code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> str:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __A ( self : List[Any] ) -> str:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(_lowerCamelCase ) # fails here
def __A ( self : List[Any] ) -> int:
__magic_name__ = [[1, 2, 3], [1, 2, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(3 )
__magic_name__ = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __A ( self : Any ) -> Union[str, Any]:
__magic_name__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 664 | 0 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self :Optional[int] , __snake_case :int="</s>" , __snake_case :List[Any]="<unk>" , __snake_case :Optional[int]="<pad>" , __snake_case :Any=1_25 , __snake_case :Optional[Any]=None , **__snake_case :Optional[int] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
__magic_name__ : Tuple =[f"<extra_id_{i}>" for i in range(__snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__magic_name__ : List[Any] =len(set(filter(lambda __snake_case : bool("""extra_id""" in str(__snake_case ) ) , __snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
__magic_name__ : Tuple =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token
__magic_name__ : List[str] =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token
__magic_name__ : int =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token
super().__init__(
eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , extra_ids=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
__magic_name__ : Union[str, Any] =extra_ids
__magic_name__ : Tuple =2**8 # utf is 8 bits
# define special tokens dict
__magic_name__ : Dict[int, str] ={
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__magic_name__ : Optional[int] =len(self.special_tokens_encoder )
__magic_name__ : Any =len(__snake_case )
for i, token in enumerate(__snake_case ):
__magic_name__ : Union[str, Any] =self.vocab_size + i - n
__magic_name__ : Dict[str, int] ={v: k for k, v in self.special_tokens_encoder.items()}
@property
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def A__ ( self :Tuple , __snake_case :List[int] , __snake_case :Optional[List[int]] = None , __snake_case :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__snake_case )) + [1]
return ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
def A__ ( self :Union[str, Any] , __snake_case :List[int] ):
'''simple docstring'''
if len(__snake_case ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def A__ ( self :Any , __snake_case :List[int] , __snake_case :Optional[List[int]] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =[self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A__ ( self :Union[str, Any] , __snake_case :List[int] , __snake_case :Optional[List[int]] = None ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self._add_eos_if_not_present(__snake_case )
if token_ids_a is None:
return token_ids_a
else:
__magic_name__ : List[str] =self._add_eos_if_not_present(__snake_case )
return token_ids_a + token_ids_a
def A__ ( self :Tuple , __snake_case :str ):
'''simple docstring'''
__magic_name__ : Dict =[chr(__snake_case ) for i in text.encode("""utf-8""" )]
return tokens
def A__ ( self :int , __snake_case :List[str] ):
'''simple docstring'''
if token in self.special_tokens_encoder:
__magic_name__ : Dict =self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__magic_name__ : int =self.added_tokens_encoder[token]
elif len(__snake_case ) != 1:
__magic_name__ : Optional[Any] =self.unk_token_id
else:
__magic_name__ : Any =ord(__snake_case ) + self._num_special_tokens
return token_id
def A__ ( self :Optional[Any] , __snake_case :int ):
'''simple docstring'''
if index in self.special_tokens_decoder:
__magic_name__ : Any =self.special_tokens_decoder[index]
else:
__magic_name__ : int =chr(index - self._num_special_tokens )
return token
def A__ ( self :Union[str, Any] , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Any =B""""""
for token in tokens:
if token in self.special_tokens_decoder:
__magic_name__ : int =self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
__magic_name__ : Union[str, Any] =self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
__magic_name__ : str =token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
__magic_name__ : Optional[int] =token.encode("""utf-8""" )
else:
__magic_name__ : Tuple =bytes([ord(__snake_case )] )
bstring += tok_string
__magic_name__ : str =bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def A__ ( self :Tuple , __snake_case :str , __snake_case :Optional[str] = None ):
'''simple docstring'''
return ()
| 21 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__magic_name__ : Dict ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Union[str, Any]:
__magic_name__ = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def __A ( cls : Any ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def __A ( self : Optional[Any] ) -> Dict:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCamelCase , repo_id="test-config" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : str ) -> Optional[int]:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_lowerCamelCase , repo_id="valid_org/test-config-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : Optional[int] ) -> Union[str, Any]:
CustomConfig.register_for_auto_class()
__magic_name__ = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
__magic_name__ = AutoConfig.from_pretrained(f'{USER}/test-dynamic-config' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Optional[Any]:
__magic_name__ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__magic_name__ = c.n_embd + 1 # int
__magic_name__ = c.resid_pdrop + 1.0 # float
__magic_name__ = not c.scale_attn_weights # bool
__magic_name__ = c.summary_type + "foo" # str
c.update_from_string(
f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(_lowerCamelCase , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(_lowerCamelCase , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(_lowerCamelCase , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(_lowerCamelCase , c.summary_type , "mismatch for key: summary_type" )
def __A ( self : List[Any] ) -> Union[str, Any]:
__magic_name__ = PretrainedConfig()
__magic_name__ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_lowerCamelCase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
__magic_name__ = [key for key, value in config_common_kwargs.items() if value == getattr(_lowerCamelCase , _lowerCamelCase )]
if len(_lowerCamelCase ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f' {", ".join(_lowerCamelCase )}.' )
def __A ( self : List[Any] ) -> List[Any]:
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(_lowerCamelCase )
def __A ( self : Tuple ) -> int:
# A mock response for an HTTP head request to emulate server down
__magic_name__ = mock.Mock()
__magic_name__ = 5_00
__magic_name__ = {}
__magic_name__ = HTTPError
__magic_name__ = {}
# Download this model to make sure it's in the cache.
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Union[str, Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
__magic_name__ = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def __A ( self : Dict ) -> Optional[int]:
__magic_name__ = AutoConfig.from_pretrained("bert-base-cased" )
__magic_name__ = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_lowerCamelCase )
__magic_name__ = 2
json.dump(configuration.to_dict() , open(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__magic_name__ = ["config.42.0.0.json"]
__magic_name__ = 7_68
configuration.save_pretrained(_lowerCamelCase )
shutil.move(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , os.path.join(_lowerCamelCase , "config.42.0.0.json" ) )
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def __A ( self : Optional[int] ) -> str:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__magic_name__ = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__magic_name__ = "v4.0.0"
__magic_name__ , __magic_name__ = new_transformers.models.auto.AutoConfig.from_pretrained(
_lowerCamelCase , return_unused_kwargs=_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_lowerCamelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__magic_name__ = "v3.0.0"
__magic_name__ = old_transformers.models.auto.AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 664 | 0 |
'''simple docstring'''
import math
import qiskit
def snake_case_ (UpperCamelCase : int = 1 , UpperCamelCase : int = 1 , UpperCamelCase : int = 1 ):
'''simple docstring'''
if (
isinstance(UpperCamelCase , UpperCamelCase )
or isinstance(UpperCamelCase , UpperCamelCase )
or isinstance(UpperCamelCase , UpperCamelCase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(UpperCamelCase ) != input_a)
or (math.floor(UpperCamelCase ) != input_a)
or (math.floor(UpperCamelCase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
_a = qiskit.QuantumRegister(4 , '''qr''' )
_a = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
_a = [input_a, input_a, carry_in]
_a = qiskit.QuantumCircuit(UpperCamelCase , UpperCamelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(UpperCamelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(UpperCamelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(UpperCamelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , UpperCamelCase ) # measure the last two qbits
_a = qiskit.Aer.get_backend('''aer_simulator''' )
_a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 22 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]=7 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[Any]=18 , _lowerCamelCase : Union[str, Any]=30 , _lowerCamelCase : Tuple=4_00 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=True , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , ) -> Dict:
__magic_name__ = size if size is not None else {"height": 18, "width": 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
def __A ( self : int ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( A , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = DPTImageProcessor if is_vision_available() else None
def __A ( self : Dict ) -> Any:
__magic_name__ = DPTImageProcessingTester(self )
@property
def __A ( self : str ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> List[str]:
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size" ) )
def __A ( self : List[str] ) -> List[Any]:
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __A ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Dict ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Optional[int] ) -> Dict:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 664 | 0 |
import numpy
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
UpperCamelCase_ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCamelCase_ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCamelCase_ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCamelCase_ = numpy.random.rand(3 , 1 )
# Real output values provided.
UpperCamelCase_ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCamelCase_ = numpy.zeros(output_array.shape )
def _UpperCAmelCase ( self ) -> numpy.ndarray:
UpperCamelCase_ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _UpperCAmelCase ( self ) -> None:
UpperCamelCase_ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
UpperCamelCase_ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
UpperCamelCase_ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
for iteration in range(1 , iterations + 1 ):
UpperCamelCase_ = self.feedforward()
self.back_propagation()
if give_loss:
UpperCamelCase_ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> int:
UpperCamelCase_ = input_arr
UpperCamelCase_ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
UpperCamelCase_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _snake_case (__lowercase):
return 1 / (1 + numpy.exp(-value))
def _snake_case (__lowercase):
return (value) * (1 - (value))
def _snake_case ():
UpperCamelCase_ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCamelCase_ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
UpperCamelCase_ = TwoHiddenLayerNeuralNetwork(
input_array=__lowercase , output_array=__lowercase)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__lowercase , iterations=10 , give_loss=__lowercase)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 23 |
'''simple docstring'''
import numpy
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : numpy.ndarray ) -> None:
__magic_name__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__magic_name__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__magic_name__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__magic_name__ = numpy.random.rand(3 , 1 )
# Real output values provided.
__magic_name__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__magic_name__ = numpy.zeros(output_array.shape )
def __A ( self : int ) -> numpy.ndarray:
__magic_name__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __A ( self : Dict ) -> None:
__magic_name__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__magic_name__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__magic_name__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __A ( self : Optional[int] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : int , _lowerCamelCase : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
__magic_name__ = self.feedforward()
self.back_propagation()
if give_loss:
__magic_name__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def __A ( self : Tuple , _lowerCamelCase : numpy.ndarray ) -> int:
__magic_name__ = input_arr
__magic_name__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__magic_name__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__magic_name__ = TwoHiddenLayerNeuralNetwork(
input_array=lowerCamelCase_ , output_array=lowerCamelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowerCamelCase_ , iterations=10 , give_loss=lowerCamelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664 | 0 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
__snake_case = OmegaConf.load(_lowerCamelCase )
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
__snake_case = {}
__snake_case = '''first_stage_model.'''
for key in keys:
if key.startswith(_lowerCamelCase ):
__snake_case = state_dict[key]
# extract state_dict for UNetLDM
__snake_case = {}
__snake_case = '''model.diffusion_model.'''
for key in keys:
if key.startswith(_lowerCamelCase ):
__snake_case = state_dict[key]
__snake_case = config.model.params.first_stage_config.params
__snake_case = config.model.params.unet_config.params
__snake_case = VQModel(**_lowerCamelCase ).eval()
vqvae.load_state_dict(_lowerCamelCase )
__snake_case = UNetLDMModel(**_lowerCamelCase ).eval()
unet.load_state_dict(_lowerCamelCase )
__snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_lowerCamelCase , )
__snake_case = LDMPipeline(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
pipeline.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 24 |
'''simple docstring'''
import torch
from transformers import AutoModel
class UpperCamelCase_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Any , _lowerCamelCase : Optional[int]="sayef/fsner-bert-base-uncased" ) -> List[Any]:
super(_lowerCamelCase , self ).__init__()
__magic_name__ = AutoModel.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
__magic_name__ = torch.nn.CosineSimilarity(3 , 1e-08 )
__magic_name__ = torch.nn.Softmax(dim=1 )
def __A ( self : Tuple , **_lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return self.bert(**_lowerCamelCase ).last_hidden_state
def __A ( self : Dict , _lowerCamelCase : Dict ) -> Dict:
return token_embeddings.sum(2 , keepdim=_lowerCamelCase )
def __A ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Tuple=1 ) -> Optional[Any]:
return self.softmax(T * self.cos(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ) -> List[str]:
__magic_name__ = W_supports["sizes"].tolist()
__magic_name__ = W_supports["start_token_id"].item()
__magic_name__ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = W_supports["input_ids"] == start_token_id
__magic_name__ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_lowerCamelCase ):
if i == 0:
__magic_name__ = 0
else:
__magic_name__ = support_sizes[i - 1]
__magic_name__ = S[s : s + size][start_token_masks[s : s + size]]
__magic_name__ = S[s : s + size][end_token_masks[s : s + size]]
__magic_name__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__magic_name__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__magic_name__ = torch.vstack((p_starts, p_start) )
__magic_name__ = torch.vstack((p_ends, p_end) )
else:
__magic_name__ = p_start
__magic_name__ = p_end
return p_starts, p_ends
| 664 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
a_ = random.Random()
def lowerCamelCase__ ( _a , _a=1.0 , _a=None , _a=None):
if rng is None:
SCREAMING_SNAKE_CASE : Optional[Any] = global_rng
SCREAMING_SNAKE_CASE : Optional[int] = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
@require_torchaudio
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , a : Optional[Any] , a : int=7 , a : int=400 , a : Optional[Any]=2000 , a : List[str]=10 , a : List[str]=160 , a : str=8 , a : Optional[int]=0.0 , a : Optional[Any]=4000 , a : Union[str, Any]=False , a : int=True , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : Dict = min_seq_length
SCREAMING_SNAKE_CASE : int = max_seq_length
SCREAMING_SNAKE_CASE : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : List[Any] = padding_value
SCREAMING_SNAKE_CASE : Dict = sampling_rate
SCREAMING_SNAKE_CASE : Optional[Any] = return_attention_mask
SCREAMING_SNAKE_CASE : Tuple = do_normalize
SCREAMING_SNAKE_CASE : Tuple = feature_size
SCREAMING_SNAKE_CASE : List[str] = chunk_length
SCREAMING_SNAKE_CASE : List[str] = hop_length
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase ( self : Union[str, Any] , a : List[Any]=False , a : int=False ) -> Dict:
"""simple docstring"""
def _flatten(a : Optional[Any] ):
return list(itertools.chain(*a ) )
if equal_length:
SCREAMING_SNAKE_CASE : int = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Dict = [np.asarray(a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =WhisperFeatureExtractor if is_speech_available() else None
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = WhisperFeatureExtractionTester(self )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Any = feat_extract_first.save_pretrained(a )[0]
check_json_file_has_correct_format(a )
SCREAMING_SNAKE_CASE : str = self.feature_extraction_class.from_pretrained(a )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : List[Any] = feat_extract_first.mel_filters
SCREAMING_SNAKE_CASE : int = feat_extract_second.mel_filters
self.assertTrue(np.allclose(a , a ) )
self.assertEqual(a , a )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(a , "feat_extract.json" )
feat_extract_first.to_json_file(a )
SCREAMING_SNAKE_CASE : Tuple = self.feature_extraction_class.from_json_file(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Tuple = feat_extract_first.mel_filters
SCREAMING_SNAKE_CASE : int = feat_extract_second.mel_filters
self.assertTrue(np.allclose(a , a ) )
self.assertEqual(a , a )
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Any = [np.asarray(a ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(a , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
SCREAMING_SNAKE_CASE : int = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(a , a , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(a , return_tensors="np" ).input_features
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(a , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Tuple = np.asarray(a )
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(a , return_tensors="np" ).input_features
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(a , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1e-3 ) )
# Test truncation required
SCREAMING_SNAKE_CASE : str = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
SCREAMING_SNAKE_CASE : List[Any] = [np.asarray(a ) for speech_input in speech_inputs]
SCREAMING_SNAKE_CASE : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
SCREAMING_SNAKE_CASE : int = [np.asarray(a ) for speech_input in speech_inputs_truncated]
SCREAMING_SNAKE_CASE : Tuple = feature_extractor(a , return_tensors="np" ).input_features
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(a , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1e-3 ) )
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE : str = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCamelCase ( self : Any , a : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Union[str, Any] = ds.sort("id" ).select(range(a ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
SCREAMING_SNAKE_CASE : Optional[int] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : List[Any] = WhisperFeatureExtractor()
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(a , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , a , atol=1e-4 ) )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : int = self._load_datasamples(1 )[0]
SCREAMING_SNAKE_CASE : Tuple = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
SCREAMING_SNAKE_CASE : int = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=a )[0]
self.assertTrue(np.all(np.mean(a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a ) - 1 ) < 1e-3 ) ) | 25 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 664 | 0 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( __lowercase , unittest.TestCase ):
lowercase__: List[Any] = None
lowercase__: Optional[int] = BloomTokenizerFast
lowercase__: Union[str, Any] = BloomTokenizerFast
lowercase__: int = True
lowercase__: Tuple = False
lowercase__: Union[str, Any] = '''tokenizer_file'''
lowercase__: List[str] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
super().setUp()
__snake_case : List[Any] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Tuple , **__magic_name__ : List[Any] ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
__snake_case : Tuple = self.get_rust_tokenizer()
__snake_case : Optional[int] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
__snake_case : Any = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
__snake_case : Optional[int] = tokenizer.batch_encode_plus(__magic_name__ )["""input_ids"""]
self.assertListEqual(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Optional[int] , __magic_name__ : str=6 ) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__snake_case : List[Any] = """This is a simple input"""
__snake_case : List[Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
__snake_case : int = ("""This is a simple input""", """This is a pair""")
__snake_case : List[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(__magic_name__ , max_length=__magic_name__ )
tokenizer_r.encode_plus(__magic_name__ , max_length=__magic_name__ )
tokenizer_r.batch_encode_plus(__magic_name__ , max_length=__magic_name__ )
tokenizer_r.encode(__magic_name__ , max_length=__magic_name__ )
tokenizer_r.batch_encode_plus(__magic_name__ , max_length=__magic_name__ )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
__snake_case : Optional[int] = None # Hotfixing padding = None
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.get_rust_tokenizer()
__snake_case : List[Any] = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=__magic_name__ )
__snake_case : str = next(iter(__magic_name__ ) )["""premise"""] # pick up one data
__snake_case : Tuple = list(sample_data.values() )
__snake_case : Optional[Any] = list(map(tokenizer.encode , __magic_name__ ) )
__snake_case : Optional[int] = [tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) for x in output_tokens]
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 26 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __snake_case ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = AutoConfig.from_pretrained(lowerCamelCase_ )
__magic_name__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_ )
__magic_name__ = checkpoints.load_tax_checkpoint(lowerCamelCase_ )
__magic_name__ = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
__magic_name__ = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__magic_name__ = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["encoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_global_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = tax_mlp_layer_norm
__magic_name__ = flax_model_encoder_layer_block
# Only for layer 0:
__magic_name__ = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_global_rel_embedding
# Assigning
__magic_name__ = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
__magic_name__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
__magic_name__ = tax_enc_dec_attention_module["key"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["out"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["query"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["decoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_pre_attention_layer_norm
__magic_name__ = tax_enc_dec_attention_key
__magic_name__ = tax_enc_dec_attention_out
__magic_name__ = tax_enc_dec_attention_query
__magic_name__ = tax_enc_dec_attention_value
__magic_name__ = tax_cross_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = txa_mlp_layer_norm
__magic_name__ = flax_model_decoder_layer_block
# Decoder Normalization
__magic_name__ = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
__magic_name__ = txa_decoder_norm
# Only for layer 0:
__magic_name__ = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_decoder_rel_embedding
# Token Embeddings
__magic_name__ = tax_model["target"]["token_embedder"]["embedding"]
__magic_name__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__magic_name__ = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(lowerCamelCase_ )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
__magic_name__ : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
__magic_name__ : Optional[int] =parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 664 | 0 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=2 , snake_case_=8 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=16 , snake_case_=5 , snake_case_=2 , snake_case_=36 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self ):
_A = self.get_config()
_A = 300
return config
def lowerCAmelCase__ ( self ):
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = self.prepare_config_and_inputs()
_A = True
_A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = MraModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = True
_A = MraModel(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , encoder_hidden_states=snake_case_ , )
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = MraForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = MraForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = MraForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = MraForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_choices
_A = MraForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = ()
def lowerCAmelCase__ ( self ):
_A = MraModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = MraModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip(reason='MRA does not output attentions' )
def lowerCAmelCase__ ( self ):
return
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
_A = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
_A = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = 5_0265
_A = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
_A = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = 5_0265
_A = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
| 27 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase_ ( unittest.TestCase , A ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Any:
__magic_name__ = load_tool("text-to-speech" )
self.tool.setup()
def __A ( self : Union[str, Any] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def __A ( self : List[str] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 664 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ArgumentParser('Transformers CLI tool' ,usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(__UpperCamelCase )
DownloadCommand.register_subcommand(__UpperCamelCase )
EnvironmentCommand.register_subcommand(__UpperCamelCase )
RunCommand.register_subcommand(__UpperCamelCase )
ServeCommand.register_subcommand(__UpperCamelCase )
UserCommands.register_subcommand(__UpperCamelCase )
AddNewModelCommand.register_subcommand(__UpperCamelCase )
AddNewModelLikeCommand.register_subcommand(__UpperCamelCase )
LfsCommands.register_subcommand(__UpperCamelCase )
PTtoTFCommand.register_subcommand(__UpperCamelCase )
# Let's go
SCREAMING_SNAKE_CASE : int = parser.parse_args()
if not hasattr(__UpperCamelCase ,'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE : List[str] = args.func(__UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 28 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__magic_name__ : Dict =re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
__magic_name__ : int =10
__magic_name__ : Union[str, Any] =2_56
def __snake_case ( lowerCamelCase_ : List[str] ):
'''simple docstring'''
if len(lowerCamelCase_ ) < MIN_NUM_TOKENS:
return None
__magic_name__ = MinHash(num_perm=lowerCamelCase_ )
for token in set(lowerCamelCase_ ):
min_hash.update(token.encode() )
return min_hash
def __snake_case ( lowerCamelCase_ : str ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCamelCase_ ) if len(t.strip() ) > 0}
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *,
_lowerCamelCase : float = 0.85 , ) -> Optional[Any]:
__magic_name__ = duplication_jaccard_threshold
__magic_name__ = NUM_PERM
__magic_name__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__magic_name__ = defaultdict(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : MinHash ) -> None:
__magic_name__ = self._index.query(_lowerCamelCase )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCamelCase )
def __A ( self : Union[str, Any] ) -> List[List[Dict]]:
__magic_name__ = []
for base, duplicates in self._duplicate_clusters.items():
__magic_name__ = [base] + list(_lowerCamelCase )
# reformat the cluster to be a list of dict
__magic_name__ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(_lowerCamelCase )
return duplicate_clusters
def __A ( self : Tuple , _lowerCamelCase : Tuple ) -> None:
__magic_name__ = self.get_duplicate_clusters()
with open(_lowerCamelCase , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def __snake_case ( lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ , __magic_name__ = element
__magic_name__ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __snake_case ( lowerCamelCase_ : Type[Dataset] ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCamelCase_ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float ):
'''simple docstring'''
__magic_name__ = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCamelCase_ , lowerCamelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = get_tokens(lowerCamelCase_ )
__magic_name__ = get_tokens(lowerCamelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__magic_name__ : List[str] =None
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ = []
for elementa in cluster:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(lowerCamelCase_ , lowerCamelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__magic_name__ = 1
extremes.append(lowerCamelCase_ )
return extremes
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
global _shared_dataset
__magic_name__ = dataset
__magic_name__ = []
__magic_name__ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCamelCase_ , lowerCamelCase_ , ) , total=len(lowerCamelCase_ ) , ):
extremes_list.append(lowerCamelCase_ )
return extremes_list
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float = 0.85 ):
'''simple docstring'''
__magic_name__ = make_duplicate_clusters(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
__magic_name__ = {}
__magic_name__ = find_extremes(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__magic_name__ = element
__magic_name__ = duplicate_indices - set(extreme_dict.keys() )
__magic_name__ = dataset.filter(lambda lowerCamelCase_ , lowerCamelCase_ : idx not in remove_indices , with_indices=lowerCamelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__magic_name__ = element["base_index"] in extreme_dict
if element["is_extreme"]:
__magic_name__ = extreme_dict[element["base_index"]]["copies"]
print(F'Original dataset size: {len(lowerCamelCase_ )}' )
print(F'Number of duplicate clusters: {len(lowerCamelCase_ )}' )
print(F'Files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Unique files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Filtered dataset size: {len(lowerCamelCase_ )}' )
return ds_filter, duplicate_clusters
| 664 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase ( lowerCAmelCase ):
a__: int = 'ClapFeatureExtractor'
a__: Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''sampling_rate''' , UpperCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
lowerCamelCase_ = self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if audios is not None:
lowerCamelCase_ = self.feature_extractor(
UpperCAmelCase , sampling_rate=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and audios is not None:
lowerCamelCase_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer.model_input_names
lowerCamelCase_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 29 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
__magic_name__ : Optional[int] =logging.get_logger(__name__)
__magic_name__ : Tuple ='The Nymphenburg Palace is a beautiful palace in Munich!'
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
__magic_name__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__magic_name__ = os.path.join(get_home_dir() , "models" )
__magic_name__ = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__magic_name__ = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__magic_name__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(lowerCamelCase_ ),
}
__magic_name__ = BertConfig.from_dict(lowerCamelCase_ )
__magic_name__ = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ):
__magic_name__ = hf_param.shape
__magic_name__ = to_torch(params[gluon_param] )
__magic_name__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ = layer.attention.self
__magic_name__ = check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__magic_name__ = check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__magic_name__ = check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__magic_name__ = check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__magic_name__ = check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__magic_name__ = check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__magic_name__ = layer.attention.output
__magic_name__ = check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
__magic_name__ = check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__magic_name__ = layer.intermediate
__magic_name__ = check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__magic_name__ = check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__magic_name__ = layer.output
__magic_name__ = check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__magic_name__ = check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ = RobertaTokenizer.from_pretrained("roberta-base" )
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ )["input_ids"]
# Get gluon output
__magic_name__ = mx.nd.array([input_ids] )
__magic_name__ = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__magic_name__ = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ , return_tensors="pt" )
__magic_name__ = hf_bort_model(**lowerCamelCase_ )[0]
__magic_name__ = output_gluon[0].asnumpy()
__magic_name__ = output_hf[0].detach().numpy()
__magic_name__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , lowerCamelCase_ )
if __name__ == "__main__":
__magic_name__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ : Optional[Any] =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 664 | 0 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10)) | 30 |
'''simple docstring'''
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@slow
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
SCREAMING_SNAKE_CASE_ = BertTokenizer.from_pretrained('bert-base-uncased' )
SCREAMING_SNAKE_CASE_ = bertabert.config.encoder.vocab_size
SCREAMING_SNAKE_CASE_ = tokenizer.sep_token_id
SCREAMING_SNAKE_CASE_ = tokenizer.cls_token_id
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
SCREAMING_SNAKE_CASE_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
SCREAMING_SNAKE_CASE_ = train_dataset.select(range(32 ) )
SCREAMING_SNAKE_CASE_ = val_dataset.select(range(16 ) )
SCREAMING_SNAKE_CASE_ = 4
def _map_to_encoder_decoder_inputs(_lowerCAmelCase : Tuple ):
# Tokenizer will automatically set [BOS] <text> [EOS]
SCREAMING_SNAKE_CASE_ = tokenizer(batch['article'] , padding='max_length' , truncation=_lowerCAmelCase , max_length=512 )
SCREAMING_SNAKE_CASE_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_lowerCAmelCase , max_length=128 )
SCREAMING_SNAKE_CASE_ = inputs.input_ids
SCREAMING_SNAKE_CASE_ = inputs.attention_mask
SCREAMING_SNAKE_CASE_ = outputs.input_ids
SCREAMING_SNAKE_CASE_ = outputs.input_ids.copy()
SCREAMING_SNAKE_CASE_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
SCREAMING_SNAKE_CASE_ = outputs.attention_mask
assert all(len(_lowerCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_lowerCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = pred.label_ids
SCREAMING_SNAKE_CASE_ = pred.predictions
# all unnecessary tokens are removed
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_lowerCAmelCase ) )] ) / len(_lowerCAmelCase )
return {"accuracy": accuracy}
# map train dataset
SCREAMING_SNAKE_CASE_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_lowerCAmelCase , batch_size=_lowerCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
SCREAMING_SNAKE_CASE_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_lowerCAmelCase , batch_size=_lowerCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = SeqaSeqTrainingArguments(
output_dir=_lowerCAmelCase , per_device_train_batch_size=_lowerCAmelCase , per_device_eval_batch_size=_lowerCAmelCase , predict_with_generate=_lowerCAmelCase , evaluation_strategy='steps' , do_train=_lowerCAmelCase , do_eval=_lowerCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
SCREAMING_SNAKE_CASE_ = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , compute_metrics=_compute_metrics , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
# start training
trainer.train() | 31 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__magic_name__ : Tuple =threading.Lock()
__magic_name__ : Optional[logging.Handler] =None
__magic_name__ : List[str] ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__magic_name__ : str =logging.WARNING
__magic_name__ : Any =True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_VERBOSITY" , lowerCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def __snake_case ( ):
'''simple docstring'''
return __name__.split("." )[0]
def __snake_case ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__magic_name__ = logging.StreamHandler() # Set sys.stderr as stream.
__magic_name__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
__magic_name__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
__magic_name__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__magic_name__ = None
def __snake_case ( ):
'''simple docstring'''
return log_levels
def __snake_case ( lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if name is None:
__magic_name__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __snake_case ( lowerCamelCase_ : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCamelCase_ )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
__magic_name__ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , lowerCamelCase_ )
if no_advisory_warnings:
return
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : int =warning_advice
@functools.lru_cache(lowerCamelCase_ )
def __snake_case ( self : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : int ):
'''simple docstring'''
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : Optional[int] =warning_once
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ) -> Any: # pylint: disable=unused-argument
__magic_name__ = args[0] if args else None
def __iter__( self : int ) -> Tuple:
return iter(self._iterator )
def __getattr__( self : List[Any] , _lowerCamelCase : int ) -> List[Any]:
def empty_fn(*_lowerCamelCase : List[str] , **_lowerCamelCase : List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ) -> Any:
return self
def __exit__( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ) -> Dict:
return
class UpperCamelCase_ :
"""simple docstring"""
def __call__( self : Any , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Any ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*_lowerCamelCase , **_lowerCamelCase )
else:
return EmptyTqdm(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : Optional[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ) -> Union[str, Any]:
__magic_name__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : str ) -> Any:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__magic_name__ : List[Any] =_tqdm_cls()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = True
hf_hub_utils.enable_progress_bars()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = False
hf_hub_utils.disable_progress_bars()
| 664 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCamelCase :
@staticmethod
def UpperCamelCase( *_UpperCamelCase , **_UpperCamelCase ):
pass
@is_pipeline_test
@require_vision
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
__A : Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
_UpperCAmelCase = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = object_detector(examples[0] , threshold=0.0 )
_UpperCAmelCase = len(_UpperCamelCase )
self.assertGreater(_UpperCamelCase , 0 )
self.assertEqual(
_UpperCamelCase , [
{
'''score''': ANY(_UpperCamelCase ),
'''label''': ANY(_UpperCamelCase ),
'''box''': {'''xmin''': ANY(_UpperCamelCase ), '''ymin''': ANY(_UpperCamelCase ), '''xmax''': ANY(_UpperCamelCase ), '''ymax''': ANY(_UpperCamelCase )},
}
for i in range(_UpperCamelCase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCamelCase( self ):
pass
@require_torch
def UpperCamelCase( self ):
_UpperCAmelCase = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
_UpperCAmelCase = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
_UpperCAmelCase = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = pipeline('''zero-shot-object-detection''' )
_UpperCAmelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
_UpperCAmelCase = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCamelCase( self ):
pass
@require_torch
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = 0.2
_UpperCAmelCase = pipeline('''zero-shot-object-detection''' )
_UpperCAmelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=_UpperCamelCase , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = 2
_UpperCAmelCase = pipeline('''zero-shot-object-detection''' )
_UpperCAmelCase = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=_UpperCamelCase , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , ) | 32 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Union[str, Any] ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str =[
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = CLIPTokenizer
__lowercase : str = CLIPTokenizerFast
__lowercase : Dict = True
__lowercase : Any = {}
__lowercase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().setUp()
# fmt: off
snake_case__ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
snake_case__ = dict(zip(_a , range(len(_a ) ) ) )
snake_case__ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
snake_case__ = {'''unk_token''': '''<unk>'''}
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , **_a:Optional[int] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , **_a:Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:List[str] ):
snake_case__ = '''lower newer'''
snake_case__ = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ = '''lower newer'''
snake_case__ = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
snake_case__ = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
snake_case__ = tokens + [tokenizer.unk_token]
snake_case__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@require_ftfy
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ = self.tokenizer_class.from_pretrained(_a , **_a )
snake_case__ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
snake_case__ = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
snake_case__ = tokenizer_s.tokenize(_a )
snake_case__ = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
snake_case__ = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
snake_case__ = tokenizer_s.tokenize(_a )
snake_case__ = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of space type
snake_case__ = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
snake_case__ = tokenizer_s.tokenize(_a )
snake_case__ = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of line break type
snake_case__ = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
snake_case__ = tokenizer_s.tokenize(_a )
snake_case__ = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ = F"""{text_of_1_token} {text_of_1_token}"""
snake_case__ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
snake_case__ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
snake_case__ = F""" {text}"""
snake_case__ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
snake_case__ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
def SCREAMING_SNAKE_CASE__ ( self:str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_a ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def SCREAMING_SNAKE_CASE__ ( self:int ):
super().test_tokenization_python_rust_equals()
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
# CLIP always lower cases letters
pass
| 33 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ : Optional[Any] ={
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int =['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict =[
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple =[
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__magic_name__ : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCamelCase_ , )
assert hasattr(self , '''env''')
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = F'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
UpperCamelCase = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowerCamelCase_ , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version='''py36''' , )
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
TrainingJobAnalytics(lowerCamelCase_).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv')
@parameterized.expand([(2,)])
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[str]:
# create estimator
UpperCamelCase = self.create_estimator(lowerCamelCase_)
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''])
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy)
assert all(t <= self.results['''eval_loss'''] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , '''w''') as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCamelCase_) | 34 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__magic_name__ : str ={
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__magic_name__ : Tuple ={
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = (images / 2 + 0.5).clamp(0 , 1 )
__magic_name__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ = numpy_to_pil(lowerCamelCase_ )
return images
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
if images.ndim == 3:
__magic_name__ = images[None, ...]
__magic_name__ = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__magic_name__ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
__magic_name__ = [Image.fromarray(lowerCamelCase_ ) for image in images]
return pil_images
| 664 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowercase :
lowerCamelCase : List[str]
lowerCamelCase : Optional[str] = None
# Automatically constructed
lowerCamelCase : ClassVar[str] = "dict"
lowerCamelCase : ClassVar[Any] = None
lowerCamelCase : str = field(default='''Translation''' , init=_UpperCAmelCase , repr=_UpperCAmelCase )
def __call__( self : List[Any] ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase__ ( self : int ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class lowercase :
lowerCamelCase : Optional[List] = None
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[str] = None
# Automatically constructed
lowerCamelCase : ClassVar[str] = "dict"
lowerCamelCase : ClassVar[Any] = None
lowerCamelCase : str = field(default='''TranslationVariableLanguages''' , init=_UpperCAmelCase , repr=_UpperCAmelCase )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : int = sorted(set(self.languages ) ) if self.languages else None
SCREAMING_SNAKE_CASE__ : Dict = len(self.languages ) if self.languages else None
def __call__( self : List[Any] ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def lowercase__ ( self : Any , _lowercase : Dict ):
SCREAMING_SNAKE_CASE__ : Tuple = set(self.languages )
if self.languages and set(_lowercase ) - lang_set:
raise ValueError(
f"""Some languages in example ({', '.join(sorted(set(_lowercase ) - lang_set ) )}) are not in valid set ({', '.join(_lowercase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
SCREAMING_SNAKE_CASE__ : Any = []
for lang, text in translation_dict.items():
if isinstance(_lowercase , _lowercase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = zip(*sorted(_lowercase ) )
return {"language": languages, "translation": translations}
def lowercase__ ( self : int ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 35 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(
A , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : Any , _lowerCamelCase : GenericTensor ) -> np.ndarray:
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def __A ( self : str , _lowerCamelCase : GenericTensor ) -> np.ndarray:
__magic_name__ = self.get_masked_index(_lowerCamelCase )
__magic_name__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __A ( self : int , _lowerCamelCase : GenericTensor ) -> Any:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] ) -> Dict[str, GenericTensor]:
if return_tensors is None:
__magic_name__ = self.framework
__magic_name__ = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase )
self.ensure_exactly_one_mask_token(_lowerCamelCase )
return model_inputs
def __A ( self : List[str] , _lowerCamelCase : int ) -> List[Any]:
__magic_name__ = self.model(**_lowerCamelCase )
__magic_name__ = model_inputs["input_ids"]
return model_outputs
def __A ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Dict=None ) -> Dict:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__magic_name__ = target_ids.shape[0]
__magic_name__ = model_outputs["input_ids"][0]
__magic_name__ = model_outputs["logits"]
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__magic_name__ = outputs.numpy()
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = stable_softmax(_lowerCamelCase , axis=-1 )
if target_ids is not None:
__magic_name__ = tf.gather_nd(tf.squeeze(_lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
__magic_name__ = tf.expand_dims(_lowerCamelCase , 0 )
__magic_name__ = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
__magic_name__ , __magic_name__ = topk.values.numpy(), topk.indices.numpy()
else:
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = logits.softmax(dim=-1 )
if target_ids is not None:
__magic_name__ = probs[..., target_ids]
__magic_name__ , __magic_name__ = probs.topk(_lowerCamelCase )
__magic_name__ = []
__magic_name__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__magic_name__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__magic_name__ = input_ids.numpy().copy()
if target_ids is not None:
__magic_name__ = target_ids[p].tolist()
__magic_name__ = p
# Filter padding out:
__magic_name__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__magic_name__ = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
__magic_name__ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(_lowerCamelCase )
result.append(_lowerCamelCase )
if single_mask:
return result[0]
return result
def __A ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=None ) -> List[str]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = [targets]
try:
__magic_name__ = self.tokenizer.get_vocab()
except Exception:
__magic_name__ = {}
__magic_name__ = []
for target in targets:
__magic_name__ = vocab.get(_lowerCamelCase , _lowerCamelCase )
if id_ is None:
__magic_name__ = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , max_length=1 , truncation=_lowerCamelCase , )["input_ids"]
if len(_lowerCamelCase ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"We cannot replace it with anything meaningful, ignoring it" )
continue
__magic_name__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__magic_name__ = list(set(_lowerCamelCase ) )
if len(_lowerCamelCase ) == 0:
raise ValueError("At least one target must be provided when passed." )
__magic_name__ = np.array(_lowerCamelCase )
return target_ids
def __A ( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : int=None ) -> Tuple:
__magic_name__ = {}
if targets is not None:
__magic_name__ = self.get_target_ids(_lowerCamelCase , _lowerCamelCase )
__magic_name__ = target_ids
if top_k is not None:
__magic_name__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : int , _lowerCamelCase : Any , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Optional[int]:
__magic_name__ = super().__call__(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 664 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case : List[str] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = -1
snake_case : Union[str, Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=10 ,do_sample=SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
snake_case : Any = TextStreamer(SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=10 ,do_sample=SCREAMING_SNAKE_CASE_ ,streamer=SCREAMING_SNAKE_CASE_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case : Any = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = -1
snake_case : Optional[int] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=10 ,do_sample=SCREAMING_SNAKE_CASE_ )
snake_case : Any = tokenizer.decode(greedy_ids[0] )
snake_case : List[str] = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
snake_case : List[Any] = Thread(target=model.generate ,kwargs=SCREAMING_SNAKE_CASE_ )
thread.start()
snake_case : Tuple = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Any = -1
snake_case : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=10 ,do_sample=SCREAMING_SNAKE_CASE_ )
snake_case : str = greedy_ids[:, input_ids.shape[1] :]
snake_case : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
snake_case : str = TextStreamer(SCREAMING_SNAKE_CASE_ ,skip_prompt=SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=10 ,do_sample=SCREAMING_SNAKE_CASE_ ,streamer=SCREAMING_SNAKE_CASE_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
snake_case : str = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
snake_case : Dict = AutoTokenizer.from_pretrained("""distilgpt2""" )
snake_case : Tuple = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = -1
snake_case : Any = torch.ones((1, 5) ,device=SCREAMING_SNAKE_CASE_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
snake_case : List[Any] = TextStreamer(SCREAMING_SNAKE_CASE_ ,skip_special_tokens=SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ ,max_new_tokens=1 ,do_sample=SCREAMING_SNAKE_CASE_ ,streamer=SCREAMING_SNAKE_CASE_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
snake_case : Dict = cs.out[:-1] # Remove the final "\n"
snake_case : Tuple = tokenizer(SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = -1
snake_case : int = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ ,timeout=0.0_01 )
snake_case : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
snake_case : List[str] = Thread(target=model.generate ,kwargs=SCREAMING_SNAKE_CASE_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
snake_case : Tuple = """"""
for new_text in streamer:
streamer_text += new_text
| 36 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
'''simple docstring'''
if len(lowerCamelCase_ ) < k or k < 0:
raise ValueError("Invalid Input" )
__magic_name__ = __magic_name__ = sum(array[:k] )
for i in range(len(lowerCamelCase_ ) - k ):
__magic_name__ = current_sum - array[i] + array[i + k]
__magic_name__ = max(lowerCamelCase_ , lowerCamelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__magic_name__ : List[str] =[randint(-10_00, 10_00) for i in range(1_00)]
__magic_name__ : List[str] =randint(0, 1_10)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 664 | 0 |
def UpperCamelCase_ ( __a , __a ) -> Tuple:
a__ : Optional[int] = [0 for i in range(r + 1 )]
# nc0 = 1
a__ : Union[str, Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
a__ : Any = min(__a , __a )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 37 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : int =logging.get_logger(__name__)
__magic_name__ : List[Any] ={}
class UpperCamelCase_ ( A ):
"""simple docstring"""
UpperCAmelCase__ : int = '''llama'''
UpperCAmelCase__ : Any = ['''past_key_values''']
def __init__( self : List[Any] , _lowerCamelCase : List[Any]=3_20_00 , _lowerCamelCase : Optional[Any]=40_96 , _lowerCamelCase : Tuple=1_10_08 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="silu" , _lowerCamelCase : Optional[Any]=20_48 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Union[str, Any]=1e-6 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Dict=0 , _lowerCamelCase : int=1 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[str]=None , **_lowerCamelCase : List[Any] , ) -> Any:
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = intermediate_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__magic_name__ = num_attention_heads
__magic_name__ = num_key_value_heads
__magic_name__ = hidden_act
__magic_name__ = initializer_range
__magic_name__ = rms_norm_eps
__magic_name__ = pretraining_tp
__magic_name__ = use_cache
__magic_name__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def __A ( self : Union[str, Any] ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
__magic_name__ = self.rope_scaling.get("type" , _lowerCamelCase )
__magic_name__ = self.rope_scaling.get("factor" , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 664 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = KandinskyVaaPipeline
lowerCamelCase__ = [
'''image_embeds''',
'''negative_image_embeds''',
]
lowerCamelCase__ = ['''image_embeds''', '''negative_image_embeds''']
lowerCamelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase__ = False
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
return self.time_input_dim
@property
def __UpperCamelCase ( self ):
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ):
return 1_0_0
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case__ : Dict = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def __UpperCamelCase ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.dummy_unet
snake_case__ : Union[str, Any] = self.dummy_movq
snake_case__ : List[Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__SCREAMING_SNAKE_CASE , )
snake_case__ : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
snake_case__ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : int = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Optional[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def __UpperCamelCase ( self ):
snake_case__ : str = """cpu"""
snake_case__ : Dict = self.get_dummy_components()
snake_case__ : int = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : int = output.images
snake_case__ : Dict = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1]
snake_case__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Tuple = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
snake_case__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" )
snake_case__ : int = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
snake_case__ : str = KandinskyVaaPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
snake_case__ : List[str] = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : int = """red cat, 4k photo"""
snake_case__ : Dict = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case__ , snake_case__ : Optional[Any] = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case__ : Dict = torch.Generator(device="""cuda""" ).manual_seed(0 )
snake_case__ : Optional[int] = pipeline(
image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , output_type="""np""" , )
snake_case__ : Optional[int] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
__magic_name__ : Dict =8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 664 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case_ = cst_fwd.get(SCREAMING_SNAKE_CASE__ , np.inf )
snake_case_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case_ = new_cost_f
snake_case_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = -1
snake_case_ = set()
snake_case_ = set()
snake_case_ = {source: 0}
snake_case_ = {destination: 0}
snake_case_ = {source: None}
snake_case_ = {destination: None}
snake_case_ = PriorityQueue()
snake_case_ = PriorityQueue()
snake_case_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case_, snake_case_ = queue_forward.get()
visited_forward.add(SCREAMING_SNAKE_CASE__ )
snake_case_, snake_case_ = queue_backward.get()
visited_backward.add(SCREAMING_SNAKE_CASE__ )
snake_case_ = pass_and_relaxation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
snake_case_ = pass_and_relaxation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case_ = shortest_distance
return shortest_path_distance
lowerCAmelCase_ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
lowerCAmelCase_ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 39 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__magic_name__ : List[Any] =logging.getLogger(__name__)
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : str=-1 ) -> List[str]:
# in NER datasets, the last column is usually reserved for NER label
__magic_name__ = label_idx
def __A ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
__magic_name__ = []
__magic_name__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
__magic_name__ = []
__magic_name__ = []
else:
__magic_name__ = line.split(" " )
words.append(splits[0] )
if len(_lowerCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
return examples
def __A ( self : Optional[Any] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Union[str, Any]:
__magic_name__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(_lowerCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__magic_name__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_lowerCamelCase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __A ( self : Tuple , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : int ) -> str:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __A ( self : int , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = []
__magic_name__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
return examples
def __A ( self : Optional[int] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Any:
__magic_name__ = 0
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = preds_list[example_id]
__magic_name__ = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(_lowerCamelCase )
example_id += 1
def __A ( self : Dict , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 664 | 0 |
import requests
__UpperCAmelCase = '''YOUR API KEY'''
def UpperCamelCase ( snake_case__ : str , snake_case__ : str = giphy_api_key ) -> list:
UpperCamelCase : Optional[int] = '+'.join(query.split() )
UpperCamelCase : List[str] = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
UpperCamelCase : Tuple = requests.get(snake_case__ ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 40 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0 ) -> None:
__magic_name__ , __magic_name__ = row, column
__magic_name__ = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self : Optional[Any] ) -> str:
__magic_name__ = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__magic_name__ = 0
for row_vector in self.array:
for obj in row_vector:
__magic_name__ = max(_lowerCamelCase , len(str(_lowerCamelCase ) ) )
__magic_name__ = f'%{max_element_length}s'
# Make string and return
def single_line(_lowerCamelCase : list[float] ) -> str:
nonlocal string_format_identifier
__magic_name__ = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
return str(self )
def __A ( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ) -> bool:
if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] , _lowerCamelCase : tuple[int, int] ) -> Any:
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : float ) -> None:
assert self.validate_indicies(_lowerCamelCase )
__magic_name__ = value
def __add__( self : Union[str, Any] , _lowerCamelCase : Matrix ) -> Matrix:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = -self[r, c]
return result
def __sub__( self : Optional[int] , _lowerCamelCase : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Optional[int] , _lowerCamelCase : int | float | Matrix ) -> Matrix:
if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__magic_name__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__magic_name__ = f'Unsupported type given for another ({type(_lowerCamelCase )})'
raise TypeError(_lowerCamelCase )
def __A ( self : Optional[int] ) -> Matrix:
__magic_name__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c]
return result
def __A ( self : int , _lowerCamelCase : Matrix , _lowerCamelCase : Matrix ) -> Any:
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__magic_name__ = v.transpose()
__magic_name__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
__magic_name__ = 1
print(F'a^(-1) is {ainv}' )
# u, v
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 1, 2, -3
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}' )
def __snake_case ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 664 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (PNDMScheduler,)
SCREAMING_SNAKE_CASE : str = (('num_inference_steps', 5_0),)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,**lowercase__ : List[Any] ):
__lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**lowercase__ )
return config
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str]=0 ,**lowercase__ : Tuple ):
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' ,lowercase__ )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config(**lowercase__ )
__lowercase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__lowercase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[:]
__lowercase = scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = new_scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowercase = scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = new_scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : int ):
pass
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[Any]=0 ,**lowercase__ : Any ):
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' ,lowercase__ )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
__lowercase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__lowercase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
__lowercase = dummy_past_residuals[:]
__lowercase = scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = new_scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowercase = scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = new_scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Any ,**lowercase__ : Dict ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(**lowercase__ )
__lowercase = scheduler_class(**lowercase__ )
__lowercase = 1_0
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
__lowercase = model(lowercase__ ,lowercase__ )
__lowercase = scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__lowercase = model(lowercase__ ,lowercase__ )
__lowercase = scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' ,lowercase__ )
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ ,'''set_timesteps''' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ ,'''set_timesteps''' ):
__lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
__lowercase = dummy_past_residuals[:]
__lowercase = scheduler.step_prk(lowercase__ ,0 ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = scheduler.step_prk(lowercase__ ,1 ,lowercase__ ,**lowercase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
__lowercase = scheduler.step_plms(lowercase__ ,0 ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = scheduler.step_plms(lowercase__ ,1 ,lowercase__ ,**lowercase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase__ )
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(steps_offset=1 )
__lowercase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) ,)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowercase__ ,beta_end=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 1_0] ,[1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
__lowercase = 2_7
for scheduler_class in self.scheduler_classes:
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__lowercase = scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ).prev_sample
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
with self.assertRaises(lowercase__ ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.full_loop()
__lowercase = torch.sum(torch.abs(lowercase__ ) )
__lowercase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.full_loop(prediction_type='''v_prediction''' )
__lowercase = torch.sum(torch.abs(lowercase__ ) )
__lowercase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : int ):
# We specify different beta, so that the first alpha is 0.99
__lowercase = self.full_loop(set_alpha_to_one=lowercase__ ,beta_start=0.0_1 )
__lowercase = torch.sum(torch.abs(lowercase__ ) )
__lowercase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
__lowercase = self.full_loop(set_alpha_to_one=lowercase__ ,beta_start=0.0_1 )
__lowercase = torch.sum(torch.abs(lowercase__ ) )
__lowercase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 41 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__magic_name__ : List[Any] =logging.getLogger(__name__)
__magic_name__ : int ='Hello world! cécé herlolip'
__magic_name__ : List[Any] =namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def __snake_case ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = BertAbsConfig(
temp_dir="." , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__magic_name__ = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage )
__magic_name__ = AbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) , lowerCamelCase_ )
original.eval()
__magic_name__ = BertAbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
__magic_name__ = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
__magic_name__ = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
__magic_name__ = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__magic_name__ = encoder_input_ids
__magic_name__ = decoder_input_ids
__magic_name__ = __magic_name__ = None
__magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__magic_name__ = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = original.generator(lowerCamelCase_ )
__magic_name__ = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = new_model.generator(lowerCamelCase_ )
__magic_name__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__magic_name__ : Dict =argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__magic_name__ : Any =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 664 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
# Automatically constructed
SCREAMING_SNAKE_CASE_ = "dict"
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = field(default='Translation' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self ) -> Optional[Any]:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCamelCase( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
# Automatically constructed
SCREAMING_SNAKE_CASE_ = "dict"
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = field(default='TranslationVariableLanguages' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ) -> Dict:
'''simple docstring'''
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = set(self.languages )
if self.languages and set(SCREAMING_SNAKE_CASE_ ) - lang_set:
raise ValueError(
f'''Some languages in example ({", ".join(sorted(set(SCREAMING_SNAKE_CASE_ ) - lang_set ) )}) are not in valid set ({", ".join(SCREAMING_SNAKE_CASE_ )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ ,lowerCamelCase_ = zip(*sorted(SCREAMING_SNAKE_CASE_ ) )
return {"language": languages, "translation": translations}
def UpperCamelCase( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 42 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> str:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __A ( self : List[Any] ) -> str:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(_lowerCamelCase ) # fails here
def __A ( self : List[Any] ) -> int:
__magic_name__ = [[1, 2, 3], [1, 2, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(3 )
__magic_name__ = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __A ( self : Any ) -> Union[str, Any]:
__magic_name__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 664 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowerCAmelCase = 'bart'
lowerCAmelCase = True
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowercase__ = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
lowercase__ = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
lowercase__ = qar_model.eval()
else:
lowercase__ , lowercase__ = (None, None)
if MODEL_TYPE == "bart":
lowercase__ = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
lowercase__ = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
lowercase__ = sas_model.eval()
else:
lowercase__ , lowercase__ = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowercase__ = faiss.StandardGpuResources()
lowercase__ = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
lowercase__ = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
lowercase__ = faiss.IndexFlatIP(1_28 )
lowercase__ = faiss.index_cpu_to_gpu(SCREAMING_SNAKE_CASE , 1 , SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
lowercase__ , lowercase__ = (None, None)
lowercase__ = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
lowercase__ = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
lowercase__ = elia['''train_eli5''']
lowercase__ = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) )
lowercase__ = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = load_indexes()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = load_models()
lowerCAmelCase, lowerCAmelCase = load_train_data()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=10 ):
"""simple docstring"""
lowercase__ = embed_questions_for_retrieval([question] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = eli5_train_q_index.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = [elia_train[int(SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="wiki40b" , SCREAMING_SNAKE_CASE="dense" , SCREAMING_SNAKE_CASE=10 ):
"""simple docstring"""
if source == "none":
lowercase__ , lowercase__ = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowercase__ , lowercase__ = query_qa_dense_index(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
lowercase__ , lowercase__ = query_es_index(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index_name='''english_wiki40b_snippets_100w''' , n_results=SCREAMING_SNAKE_CASE , )
lowercase__ = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
lowercase__ = '''question: {} context: {}'''.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda SCREAMING_SNAKE_CASE : None),
} )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2_56 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.95 , SCREAMING_SNAKE_CASE=0.8 ):
"""simple docstring"""
with torch.no_grad():
lowercase__ = qa_sas_generate(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , temp=SCREAMING_SNAKE_CASE , top_p=SCREAMING_SNAKE_CASE , top_k=SCREAMING_SNAKE_CASE , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
lowerCAmelCase = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
lowerCAmelCase = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowerCAmelCase = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
lowerCAmelCase = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
lowerCAmelCase = st.sidebar.checkbox('Demo options')
if demo_options:
lowerCAmelCase = st.sidebar.selectbox(
'',
action_list,
index=3,
)
lowerCAmelCase = action_list.index(action_st)
lowerCAmelCase = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
lowerCAmelCase = show_type == 'Show full text of passages'
else:
lowerCAmelCase = 3
lowerCAmelCase = True
lowerCAmelCase = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
lowerCAmelCase = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
lowerCAmelCase = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
lowerCAmelCase = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
lowerCAmelCase = 'wiki40b'
lowerCAmelCase = 'dense'
lowerCAmelCase = 'beam'
lowerCAmelCase = 2
lowerCAmelCase = 64
lowerCAmelCase = 256
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = st.sidebar.checkbox('Generation options')
if generate_options:
lowerCAmelCase = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
lowerCAmelCase = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
lowerCAmelCase = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
lowerCAmelCase = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
lowerCAmelCase = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowerCAmelCase = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
lowerCAmelCase = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
lowerCAmelCase = None
# start main text
lowerCAmelCase = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
lowerCAmelCase = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowerCAmelCase = st.text_input('Enter your question here:', '')
else:
lowerCAmelCase = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
lowerCAmelCase, lowerCAmelCase = make_support(question, source=wiki_source, method='dense', n_results=10)
lowerCAmelCase, lowerCAmelCase = make_support(question, source=wiki_source, method='sparse', n_results=10)
lowerCAmelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowerCAmelCase = support_list[:10]
lowerCAmelCase = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
lowerCAmelCase, lowerCAmelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowerCAmelCase, lowerCAmelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
lowerCAmelCase = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
lowerCAmelCase = res[1].strip()
if sec_titles == "":
lowerCAmelCase = '[{}]({})'.format(res[0], wiki_url)
else:
lowerCAmelCase = sec_titles.split(' & ')
lowerCAmelCase = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
lowerCAmelCase = find_nearest_training(question)
lowerCAmelCase = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
lowerCAmelCase = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
lowerCAmelCase = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 43 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__magic_name__ : Dict ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Union[str, Any]:
__magic_name__ = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def __A ( cls : Any ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def __A ( self : Optional[Any] ) -> Dict:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCamelCase , repo_id="test-config" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : str ) -> Optional[int]:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_lowerCamelCase , repo_id="valid_org/test-config-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : Optional[int] ) -> Union[str, Any]:
CustomConfig.register_for_auto_class()
__magic_name__ = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
__magic_name__ = AutoConfig.from_pretrained(f'{USER}/test-dynamic-config' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Optional[Any]:
__magic_name__ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__magic_name__ = c.n_embd + 1 # int
__magic_name__ = c.resid_pdrop + 1.0 # float
__magic_name__ = not c.scale_attn_weights # bool
__magic_name__ = c.summary_type + "foo" # str
c.update_from_string(
f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(_lowerCamelCase , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(_lowerCamelCase , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(_lowerCamelCase , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(_lowerCamelCase , c.summary_type , "mismatch for key: summary_type" )
def __A ( self : List[Any] ) -> Union[str, Any]:
__magic_name__ = PretrainedConfig()
__magic_name__ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_lowerCamelCase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
__magic_name__ = [key for key, value in config_common_kwargs.items() if value == getattr(_lowerCamelCase , _lowerCamelCase )]
if len(_lowerCamelCase ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f' {", ".join(_lowerCamelCase )}.' )
def __A ( self : List[Any] ) -> List[Any]:
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(_lowerCamelCase )
def __A ( self : Tuple ) -> int:
# A mock response for an HTTP head request to emulate server down
__magic_name__ = mock.Mock()
__magic_name__ = 5_00
__magic_name__ = {}
__magic_name__ = HTTPError
__magic_name__ = {}
# Download this model to make sure it's in the cache.
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Union[str, Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
__magic_name__ = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def __A ( self : Dict ) -> Optional[int]:
__magic_name__ = AutoConfig.from_pretrained("bert-base-cased" )
__magic_name__ = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_lowerCamelCase )
__magic_name__ = 2
json.dump(configuration.to_dict() , open(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__magic_name__ = ["config.42.0.0.json"]
__magic_name__ = 7_68
configuration.save_pretrained(_lowerCamelCase )
shutil.move(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , os.path.join(_lowerCamelCase , "config.42.0.0.json" ) )
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def __A ( self : Optional[int] ) -> str:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__magic_name__ = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__magic_name__ = "v4.0.0"
__magic_name__ , __magic_name__ = new_transformers.models.auto.AutoConfig.from_pretrained(
_lowerCamelCase , return_unused_kwargs=_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_lowerCamelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__magic_name__ = "v3.0.0"
__magic_name__ = old_transformers.models.auto.AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 664 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A ):
def __init__( self : Tuple,__A : Optional[Any],__A : int ):
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowerCamelCase : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__A,scheduler=__A )
@torch.no_grad()
def __call__( self : Tuple,__A : int = 1,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : float = 0.0,__A : int = 5_0,__A : Optional[bool] = None,__A : Optional[str] = "pil",__A : bool = True,):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size,__A ):
_lowerCamelCase : Dict = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_lowerCamelCase : int = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__A,__A ) and len(__A ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(__A )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_lowerCamelCase : Tuple = randn_tensor(__A,generator=__A,device=self.device,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCamelCase : Any = self.unet(__A,__A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCamelCase : Any = self.scheduler.step(
__A,__A,__A,eta=__A,use_clipped_model_output=__A,generator=__A ).prev_sample
_lowerCamelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0,1 )
_lowerCamelCase : Tuple = image.cpu().permute(0,2,3,1 ).numpy()
if output_type == "pil":
_lowerCamelCase : Any = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A ) | 44 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]=7 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[Any]=18 , _lowerCamelCase : Union[str, Any]=30 , _lowerCamelCase : Tuple=4_00 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=True , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , ) -> Dict:
__magic_name__ = size if size is not None else {"height": 18, "width": 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
def __A ( self : int ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( A , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = DPTImageProcessor if is_vision_available() else None
def __A ( self : Dict ) -> Any:
__magic_name__ = DPTImageProcessingTester(self )
@property
def __A ( self : str ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> List[str]:
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size" ) )
def __A ( self : List[str] ) -> List[Any]:
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __A ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Dict ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Optional[int] ) -> Dict:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 664 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int]=13 , lowerCamelCase__ :str=32 , lowerCamelCase__ :Optional[Any]=3 , lowerCamelCase__ :Union[str, Any]=4 , lowerCamelCase__ :str=[10, 20, 30, 40] , lowerCamelCase__ :Any=[2, 2, 3, 2] , lowerCamelCase__ :List[str]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Tuple=37 , lowerCamelCase__ :int="gelu" , lowerCamelCase__ :Optional[Any]=10 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :int=["stage2", "stage3", "stage4"] , lowerCamelCase__ :Optional[int]=[2, 3, 4] , lowerCamelCase__ :Optional[int]=None , ):
UpperCamelCase__ :Union[str, Any] = parent
UpperCamelCase__ :List[str] = batch_size
UpperCamelCase__ :Dict = image_size
UpperCamelCase__ :Tuple = num_channels
UpperCamelCase__ :Tuple = num_stages
UpperCamelCase__ :Optional[Any] = hidden_sizes
UpperCamelCase__ :Optional[int] = depths
UpperCamelCase__ :Dict = is_training
UpperCamelCase__ :Union[str, Any] = use_labels
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :Optional[Any] = num_labels
UpperCamelCase__ :Optional[Any] = initializer_range
UpperCamelCase__ :Union[str, Any] = out_features
UpperCamelCase__ :Any = out_indices
UpperCamelCase__ :int = scope
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __a ( self :List[str] ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Union[str, Any] ):
UpperCamelCase__ :List[Any] = ConvNextModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Union[str, Any] = ConvNextForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :List[str] = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase__ :List[Any] = None
UpperCamelCase__ :Optional[Any] = ConvNextBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :int = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = config_and_inputs
UpperCamelCase__ :str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_snake_case : Dict = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_snake_case : Any = True
_snake_case : Dict = False
_snake_case : Tuple = False
_snake_case : str = False
_snake_case : Any = False
def __a ( self :str ):
UpperCamelCase__ :List[str] = ConvNextModelTester(self )
UpperCamelCase__ :Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def __a ( self :int ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self :List[str] ):
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def __a ( self :Dict ):
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def __a ( self :List[str] ):
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def __a ( self :Dict ):
pass
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Dict = model_class(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ :Tuple = [*signature.parameters.keys()]
UpperCamelCase__ :Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def __a ( self :int ):
def check_hidden_states_output(lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict ):
UpperCamelCase__ :List[Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase__ :int = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCamelCase__ :Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ :Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :int = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ :Optional[Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def __a ( self :Any ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :str = ConvNextModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> str:
UpperCamelCase__ :Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Optional[int] ):
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def __a ( self :Tuple ):
UpperCamelCase__ :Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = self.default_image_processor
UpperCamelCase__ :str = prepare_img()
UpperCamelCase__ :Dict = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase__ :int = model(**lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :Any = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase , lowercase ):
"""simple docstring"""
_snake_case : List[Any] = (ConvNextBackbone,) if is_torch_available() else ()
_snake_case : Dict = ConvNextConfig
_snake_case : Any = False
def __a ( self :Dict ):
UpperCamelCase__ :Union[str, Any] = ConvNextModelTester(self ) | 45 |
'''simple docstring'''
import numpy
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : numpy.ndarray ) -> None:
__magic_name__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__magic_name__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__magic_name__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__magic_name__ = numpy.random.rand(3 , 1 )
# Real output values provided.
__magic_name__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__magic_name__ = numpy.zeros(output_array.shape )
def __A ( self : int ) -> numpy.ndarray:
__magic_name__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __A ( self : Dict ) -> None:
__magic_name__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__magic_name__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__magic_name__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __A ( self : Optional[int] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : int , _lowerCamelCase : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
__magic_name__ = self.feedforward()
self.back_propagation()
if give_loss:
__magic_name__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def __A ( self : Tuple , _lowerCamelCase : numpy.ndarray ) -> int:
__magic_name__ = input_arr
__magic_name__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__magic_name__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__magic_name__ = TwoHiddenLayerNeuralNetwork(
input_array=lowerCamelCase_ , output_array=lowerCamelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowerCamelCase_ , iterations=10 , give_loss=lowerCamelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Tuple = ""
else:
_lowerCamelCase : str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Tuple = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : Any = dct.pop(_lowerCamelCase )
_lowerCamelCase : Dict = val
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowerCamelCase : str = 8
# set labels if required
if not base_model:
_lowerCamelCase : str = 1000
_lowerCamelCase : Any = "huggingface/label-files"
_lowerCamelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowerCamelCase : int = 384
_lowerCamelCase : str = 1536
_lowerCamelCase : List[str] = 12
_lowerCamelCase : Optional[int] = 6
# load original model from torch hub
_lowerCamelCase : Union[str, Any] = torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
_lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
_lowerCamelCase : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
_lowerCamelCase : Union[str, Any] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_lowerCamelCase : Tuple = ViTImageProcessor()
_lowerCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Dict = encoding["pixel_values"]
_lowerCamelCase : int = model(_lowerCamelCase )
if base_model:
_lowerCamelCase : List[str] = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
_lowerCamelCase : Tuple = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model) | 46 |
'''simple docstring'''
import torch
from transformers import AutoModel
class UpperCamelCase_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Any , _lowerCamelCase : Optional[int]="sayef/fsner-bert-base-uncased" ) -> List[Any]:
super(_lowerCamelCase , self ).__init__()
__magic_name__ = AutoModel.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
__magic_name__ = torch.nn.CosineSimilarity(3 , 1e-08 )
__magic_name__ = torch.nn.Softmax(dim=1 )
def __A ( self : Tuple , **_lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return self.bert(**_lowerCamelCase ).last_hidden_state
def __A ( self : Dict , _lowerCamelCase : Dict ) -> Dict:
return token_embeddings.sum(2 , keepdim=_lowerCamelCase )
def __A ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Tuple=1 ) -> Optional[Any]:
return self.softmax(T * self.cos(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ) -> List[str]:
__magic_name__ = W_supports["sizes"].tolist()
__magic_name__ = W_supports["start_token_id"].item()
__magic_name__ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = W_supports["input_ids"] == start_token_id
__magic_name__ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_lowerCamelCase ):
if i == 0:
__magic_name__ = 0
else:
__magic_name__ = support_sizes[i - 1]
__magic_name__ = S[s : s + size][start_token_masks[s : s + size]]
__magic_name__ = S[s : s + size][end_token_masks[s : s + size]]
__magic_name__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__magic_name__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__magic_name__ = torch.vstack((p_starts, p_start) )
__magic_name__ = torch.vstack((p_ends, p_end) )
else:
__magic_name__ = p_start
__magic_name__ = p_end
return p_starts, p_ends
| 664 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : Union[str, Any] = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
__a : Union[str, Any] = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__a : str = model(SCREAMING_SNAKE_CASE__ )['last_hidden_state']
__a : List[Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice.
__a : str = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 47 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 664 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = "▁"
UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase__ : Optional[Any] = {
"facebook/mbart-large-50-one-to-many-mmt": 10_24,
}
# fmt: off
UpperCAmelCase__ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = VOCAB_FILES_NAMES
snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Tuple = ['input_ids', 'attention_mask']
snake_case__ :List[int] = []
snake_case__ :List[int] = []
def __init__( self : int , __magic_name__ : int , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__magic_name__ , tgt_lang=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
lowerCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ = 1
lowerCAmelCase__ = len(self.sp_model )
lowerCAmelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ )
}
lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX"
lowerCAmelCase__ = self.lang_code_to_id[self._src_lang]
lowerCAmelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : List[Any] , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = ""
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__magic_name__ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(__magic_name__ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , "wb" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
lowerCAmelCase__ = [1] * len(self.prefix_tokens )
lowerCAmelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ )
lowerCAmelCase__ = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[src_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[tgt_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
| 48 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __snake_case ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = AutoConfig.from_pretrained(lowerCamelCase_ )
__magic_name__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_ )
__magic_name__ = checkpoints.load_tax_checkpoint(lowerCamelCase_ )
__magic_name__ = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
__magic_name__ = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__magic_name__ = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["encoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_global_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = tax_mlp_layer_norm
__magic_name__ = flax_model_encoder_layer_block
# Only for layer 0:
__magic_name__ = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_global_rel_embedding
# Assigning
__magic_name__ = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
__magic_name__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
__magic_name__ = tax_enc_dec_attention_module["key"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["out"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["query"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["decoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_pre_attention_layer_norm
__magic_name__ = tax_enc_dec_attention_key
__magic_name__ = tax_enc_dec_attention_out
__magic_name__ = tax_enc_dec_attention_query
__magic_name__ = tax_enc_dec_attention_value
__magic_name__ = tax_cross_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = txa_mlp_layer_norm
__magic_name__ = flax_model_decoder_layer_block
# Decoder Normalization
__magic_name__ = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
__magic_name__ = txa_decoder_norm
# Only for layer 0:
__magic_name__ = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_decoder_rel_embedding
# Token Embeddings
__magic_name__ = tax_model["target"]["token_embedder"]["embedding"]
__magic_name__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__magic_name__ = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(lowerCamelCase_ )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
__magic_name__ : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
__magic_name__ : Optional[int] =parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 664 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : Tuple = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "longformer"
def __init__( self : List[str] , _lowercase : Union[List[int], int] = 5_12 , _lowercase : int = 2 , _lowercase : int = 1 , _lowercase : int = 0 , _lowercase : int = 2 , _lowercase : int = 3_05_22 , _lowercase : int = 7_68 , _lowercase : int = 12 , _lowercase : int = 12 , _lowercase : int = 30_72 , _lowercase : str = "gelu" , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : int = 5_12 , _lowercase : int = 2 , _lowercase : float = 0.02 , _lowercase : float = 1E-12 , _lowercase : bool = False , **_lowercase : List[str] , ):
super().__init__(pad_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = attention_window
__UpperCAmelCase = sep_token_id
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = onnx_export
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : List[str] , _lowercase : "PretrainedConfig" , _lowercase : str = "default" , _lowercase : "List[PatchingSpec]" = None ):
super().__init__(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = True
@property
def a ( self : Dict ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def a ( self : int ):
__UpperCAmelCase = super().outputs
if self.task == "default":
__UpperCAmelCase = {0: '''batch'''}
return outputs
@property
def a ( self : Any ):
return 1E-4
@property
def a ( self : Optional[Any] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def a ( self : List[str] , _lowercase : "PreTrainedTokenizerBase" , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
__UpperCAmelCase = super().generate_dummy_inputs(
preprocessor=_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
__UpperCAmelCase = 1
return inputs
| 49 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase_ ( unittest.TestCase , A ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Any:
__magic_name__ = load_tool("text-to-speech" )
self.tool.setup()
def __A ( self : Union[str, Any] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def __A ( self : List[str] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 664 | 0 |
'''simple docstring'''
import math
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = [True] * n
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowerCamelCase__ = i * 2
while index < n:
lowerCamelCase__ = False
lowerCamelCase__ = index + i
lowerCamelCase__ = [2]
for i in range(3 , __lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(__lowerCAmelCase )
return primes
def A__ ( __lowerCAmelCase : int = 9999_6666_3333 ):
lowerCamelCase__ = math.floor(math.sqrt(__lowerCAmelCase ) ) + 100
lowerCamelCase__ = prime_sieve(__lowerCAmelCase )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase__ = primes[prime_index + 1]
lowerCamelCase__ = last_prime**2
lowerCamelCase__ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase__ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase__ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase__ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase__ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 50 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__magic_name__ : Dict =re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
__magic_name__ : int =10
__magic_name__ : Union[str, Any] =2_56
def __snake_case ( lowerCamelCase_ : List[str] ):
'''simple docstring'''
if len(lowerCamelCase_ ) < MIN_NUM_TOKENS:
return None
__magic_name__ = MinHash(num_perm=lowerCamelCase_ )
for token in set(lowerCamelCase_ ):
min_hash.update(token.encode() )
return min_hash
def __snake_case ( lowerCamelCase_ : str ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCamelCase_ ) if len(t.strip() ) > 0}
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *,
_lowerCamelCase : float = 0.85 , ) -> Optional[Any]:
__magic_name__ = duplication_jaccard_threshold
__magic_name__ = NUM_PERM
__magic_name__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__magic_name__ = defaultdict(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : MinHash ) -> None:
__magic_name__ = self._index.query(_lowerCamelCase )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCamelCase )
def __A ( self : Union[str, Any] ) -> List[List[Dict]]:
__magic_name__ = []
for base, duplicates in self._duplicate_clusters.items():
__magic_name__ = [base] + list(_lowerCamelCase )
# reformat the cluster to be a list of dict
__magic_name__ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(_lowerCamelCase )
return duplicate_clusters
def __A ( self : Tuple , _lowerCamelCase : Tuple ) -> None:
__magic_name__ = self.get_duplicate_clusters()
with open(_lowerCamelCase , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def __snake_case ( lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ , __magic_name__ = element
__magic_name__ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __snake_case ( lowerCamelCase_ : Type[Dataset] ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCamelCase_ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float ):
'''simple docstring'''
__magic_name__ = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCamelCase_ , lowerCamelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = get_tokens(lowerCamelCase_ )
__magic_name__ = get_tokens(lowerCamelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__magic_name__ : List[str] =None
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ = []
for elementa in cluster:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(lowerCamelCase_ , lowerCamelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__magic_name__ = 1
extremes.append(lowerCamelCase_ )
return extremes
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
global _shared_dataset
__magic_name__ = dataset
__magic_name__ = []
__magic_name__ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCamelCase_ , lowerCamelCase_ , ) , total=len(lowerCamelCase_ ) , ):
extremes_list.append(lowerCamelCase_ )
return extremes_list
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float = 0.85 ):
'''simple docstring'''
__magic_name__ = make_duplicate_clusters(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
__magic_name__ = {}
__magic_name__ = find_extremes(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__magic_name__ = element
__magic_name__ = duplicate_indices - set(extreme_dict.keys() )
__magic_name__ = dataset.filter(lambda lowerCamelCase_ , lowerCamelCase_ : idx not in remove_indices , with_indices=lowerCamelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__magic_name__ = element["base_index"] in extreme_dict
if element["is_extreme"]:
__magic_name__ = extreme_dict[element["base_index"]]["copies"]
print(F'Original dataset size: {len(lowerCamelCase_ )}' )
print(F'Number of duplicate clusters: {len(lowerCamelCase_ )}' )
print(F'Files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Unique files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Filtered dataset size: {len(lowerCamelCase_ )}' )
return ds_filter, duplicate_clusters
| 664 | 0 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 51 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
__magic_name__ : Optional[int] =logging.get_logger(__name__)
__magic_name__ : Tuple ='The Nymphenburg Palace is a beautiful palace in Munich!'
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
__magic_name__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__magic_name__ = os.path.join(get_home_dir() , "models" )
__magic_name__ = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__magic_name__ = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__magic_name__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(lowerCamelCase_ ),
}
__magic_name__ = BertConfig.from_dict(lowerCamelCase_ )
__magic_name__ = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ):
__magic_name__ = hf_param.shape
__magic_name__ = to_torch(params[gluon_param] )
__magic_name__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ = layer.attention.self
__magic_name__ = check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__magic_name__ = check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__magic_name__ = check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__magic_name__ = check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__magic_name__ = check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__magic_name__ = check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__magic_name__ = layer.attention.output
__magic_name__ = check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
__magic_name__ = check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__magic_name__ = layer.intermediate
__magic_name__ = check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__magic_name__ = check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__magic_name__ = layer.output
__magic_name__ = check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__magic_name__ = check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ = RobertaTokenizer.from_pretrained("roberta-base" )
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ )["input_ids"]
# Get gluon output
__magic_name__ = mx.nd.array([input_ids] )
__magic_name__ = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__magic_name__ = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ , return_tensors="pt" )
__magic_name__ = hf_bort_model(**lowerCamelCase_ )[0]
__magic_name__ = output_gluon[0].asnumpy()
__magic_name__ = output_hf[0].detach().numpy()
__magic_name__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , lowerCamelCase_ )
if __name__ == "__main__":
__magic_name__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ : Optional[Any] =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 664 | 0 |
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
A = '''sshleifer/mar_enro_6_3_student'''
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
super().setUp()
__a : Dict = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_UpperCAmelCase , )
__a : int = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
MarianMTModel.from_pretrained(_UpperCAmelCase )
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
__a : List[Any] = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
__a : List[Any] = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
__a : Dict = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
__a : Dict = bash_script.replace(_UpperCAmelCase , str(_UpperCAmelCase ) )
__a : Dict = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__a : int = f"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__a : Optional[int] = ['''finetune.py'''] + bash_script.split() + args
with patch.object(_UpperCAmelCase , '''argv''' , _UpperCAmelCase ):
__a : str = argparse.ArgumentParser()
__a : int = pl.Trainer.add_argparse_args(_UpperCAmelCase )
__a : Optional[Any] = SummarizationModule.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
__a : Optional[int] = parser.parse_args()
__a : Union[str, Any] = main(_UpperCAmelCase )
# Check metrics
__a : int = load_json(model.metrics_save_path )
__a : Tuple = metrics['''val'''][0]
__a : Union[str, Any] = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , _UpperCAmelCase )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.0_1 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__a : Optional[int] = os.listdir(_UpperCAmelCase )
__a : Optional[int] = [x for x in contents if x.endswith('''.ckpt''' )][0]
__a : Tuple = os.path.join(args.output_dir , _UpperCAmelCase )
__a : int = torch.load(_UpperCAmelCase , map_location='''cpu''' )
__a : Optional[Any] = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__a : Optional[int] = {os.path.basename(_UpperCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
__a : Any = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
__a : List[Any] = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
__a : Union[str, Any] = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
__a : str = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
__a : Optional[Any] = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
__a : List[str] = bash_script.replace(_UpperCAmelCase , str(_UpperCAmelCase ) )
__a : str = self.get_auto_remove_tmp_dir()
__a : List[Any] = bash_script.replace('''--fp16''' , '''''' )
__a : str = 6
__a : Dict = (
['''distillation.py''']
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"""--num_train_epochs={epochs}""",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_UpperCAmelCase , '''argv''' , _UpperCAmelCase ):
__a : str = argparse.ArgumentParser()
__a : Tuple = pl.Trainer.add_argparse_args(_UpperCAmelCase )
__a : str = SummarizationDistiller.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
__a : Any = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__a : List[Any] = distill_main(_UpperCAmelCase )
# Check metrics
__a : Union[str, Any] = load_json(model.metrics_save_path )
__a : Union[str, Any] = metrics['''val'''][0]
__a : Tuple = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , _UpperCAmelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
__a : Optional[Any] = os.listdir(_UpperCAmelCase )
__a : Dict = [x for x in contents if x.endswith('''.ckpt''' )][0]
__a : str = os.path.join(args.output_dir , _UpperCAmelCase )
__a : Optional[int] = torch.load(_UpperCAmelCase , map_location='''cpu''' )
__a : int = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__a : int = {os.path.basename(_UpperCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 52 |
'''simple docstring'''
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 | 0 |
from sklearn.metrics import recall_score
import datasets
_snake_case : Dict = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
_snake_case : Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
_snake_case : str = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : Optional[Any] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , )
def lowercase ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]="binary" , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str="warn" , ) -> int:
__lowerCAmelCase = recall_score(
lowerCAmelCase_ , lowerCAmelCase_ , labels=lowerCAmelCase_ , pos_label=lowerCAmelCase_ , average=lowerCAmelCase_ , sample_weight=lowerCAmelCase_ , zero_division=lowerCAmelCase_ , )
return {"recall": float(lowerCAmelCase_ ) if score.size == 1 else score}
| 53 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__magic_name__ : Tuple =threading.Lock()
__magic_name__ : Optional[logging.Handler] =None
__magic_name__ : List[str] ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__magic_name__ : str =logging.WARNING
__magic_name__ : Any =True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_VERBOSITY" , lowerCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def __snake_case ( ):
'''simple docstring'''
return __name__.split("." )[0]
def __snake_case ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__magic_name__ = logging.StreamHandler() # Set sys.stderr as stream.
__magic_name__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
__magic_name__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
__magic_name__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__magic_name__ = None
def __snake_case ( ):
'''simple docstring'''
return log_levels
def __snake_case ( lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if name is None:
__magic_name__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __snake_case ( lowerCamelCase_ : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCamelCase_ )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
__magic_name__ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , lowerCamelCase_ )
if no_advisory_warnings:
return
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : int =warning_advice
@functools.lru_cache(lowerCamelCase_ )
def __snake_case ( self : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : int ):
'''simple docstring'''
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : Optional[int] =warning_once
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ) -> Any: # pylint: disable=unused-argument
__magic_name__ = args[0] if args else None
def __iter__( self : int ) -> Tuple:
return iter(self._iterator )
def __getattr__( self : List[Any] , _lowerCamelCase : int ) -> List[Any]:
def empty_fn(*_lowerCamelCase : List[str] , **_lowerCamelCase : List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ) -> Any:
return self
def __exit__( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ) -> Dict:
return
class UpperCamelCase_ :
"""simple docstring"""
def __call__( self : Any , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Any ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*_lowerCamelCase , **_lowerCamelCase )
else:
return EmptyTqdm(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : Optional[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ) -> Union[str, Any]:
__magic_name__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : str ) -> Any:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__magic_name__ : List[Any] =_tqdm_cls()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = True
hf_hub_utils.enable_progress_bars()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = False
hf_hub_utils.disable_progress_bars()
| 664 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__lowercase : Optional[Any] =datasets.logging.get_logger(__name__)
__lowercase : Optional[Any] ="""\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
__lowercase : List[str] ="""\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
__lowercase : Optional[Any] ="""
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> bleurt = datasets.load_metric(\"bleurt\")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results[\"scores\"]])
[1.03, 1.04]
"""
__lowercase : str ={
"""bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""",
"""bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""",
"""bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""",
"""bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""",
"""bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""",
"""bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""",
"""BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""",
"""BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""",
"""BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""",
"""BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: List[Any] ) -> Tuple:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." )
UpperCAmelCase_ ="bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
UpperCAmelCase_ =self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
UpperCAmelCase_ =self.config_name.upper()
else:
raise KeyError(
F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
UpperCAmelCase_ =dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
UpperCAmelCase_ =score.BleurtScorer(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.scorer.score(references=_lowerCAmelCase , candidates=_lowerCAmelCase )
return {"scores": scores}
| 54 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Union[str, Any] ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str =[
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
# TODO Update this
SCREAMING_SNAKE_CASE :Dict = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "esm"
def __init__( self : List[str] ,A : Dict=None ,A : Tuple=None ,A : Any=None ,A : Optional[Any]=7_68 ,A : Tuple=12 ,A : List[str]=12 ,A : Tuple=30_72 ,A : List[str]=0.1 ,A : List[Any]=0.1 ,A : int=10_26 ,A : List[str]=0.02 ,A : Union[str, Any]=1E-12 ,A : List[Any]="absolute" ,A : List[Any]=True ,A : Union[str, Any]=None ,A : Optional[int]=False ,A : Dict=False ,A : Tuple=None ,A : Optional[int]=None ,**A : List[Any] ,):
super().__init__(pad_token_id=A ,mask_token_id=A ,**A )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = initializer_range
__A = layer_norm_eps
__A = position_embedding_type
__A = use_cache
__A = emb_layer_norm_before
__A = token_dropout
__A = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
__A = EsmFoldConfig()
elif isinstance(A ,A ):
__A = EsmFoldConfig(**A )
__A = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
__A = get_default_vocab_list()
else:
__A = vocab_list
else:
__A = None
__A = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,A ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def UpperCamelCase_ ( self : Optional[int] ):
__A = super().to_dict()
if isinstance(self.esmfold_config ,A ):
__A = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = None
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 0
snake_case_ = True
snake_case_ = False
snake_case_ = 128
snake_case_ = None
def UpperCamelCase_ ( self : List[Any] ):
if self.trunk is None:
__A = TrunkConfig()
elif isinstance(self.trunk ,A ):
__A = TrunkConfig(**self.trunk )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = asdict(self )
__A = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 48
snake_case_ = 1024
snake_case_ = 128
snake_case_ = 32
snake_case_ = 32
snake_case_ = 32
snake_case_ = 0
snake_case_ = 0
snake_case_ = False
snake_case_ = 4
snake_case_ = 128
snake_case_ = None
def UpperCamelCase_ ( self : List[Any] ):
if self.structure_module is None:
__A = StructureModuleConfig()
elif isinstance(self.structure_module ,A ):
__A = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
__A = self.sequence_state_dim // self.sequence_head_width
__A = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCamelCase_ ( self : Tuple ):
__A = asdict(self )
__A = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 384
snake_case_ = 128
snake_case_ = 16
snake_case_ = 128
snake_case_ = 12
snake_case_ = 4
snake_case_ = 8
snake_case_ = 0.1
snake_case_ = 8
snake_case_ = 1
snake_case_ = 2
snake_case_ = 7
snake_case_ = 10
snake_case_ = 1E-8
snake_case_ = 1E5
def UpperCamelCase_ ( self : Union[str, Any] ):
return asdict(self )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ : Optional[Any] ={
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int =['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict =[
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple =[
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__magic_name__ : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _a (lowercase__ : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__magic_name__ : str ={
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__magic_name__ : Tuple ={
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = (images / 2 + 0.5).clamp(0 , 1 )
__magic_name__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ = numpy_to_pil(lowerCamelCase_ )
return images
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
if images.ndim == 3:
__magic_name__ = images[None, ...]
__magic_name__ = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__magic_name__ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
__magic_name__ = [Image.fromarray(lowerCamelCase_ ) for image in images]
return pil_images
| 664 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _lowerCAmelCase:
"""simple docstring"""
a : Optional[int] =XGLMConfig
a : str ={}
a : Any ='''gelu'''
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_4 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=0.0_2 , ):
UpperCamelCase_: Dict = parent
UpperCamelCase_: str = batch_size
UpperCamelCase_: List[str] = seq_length
UpperCamelCase_: Tuple = is_training
UpperCamelCase_: Optional[Any] = use_input_mask
UpperCamelCase_: Any = use_labels
UpperCamelCase_: List[Any] = vocab_size
UpperCamelCase_: Optional[Any] = d_model
UpperCamelCase_: List[str] = num_hidden_layers
UpperCamelCase_: List[str] = num_attention_heads
UpperCamelCase_: List[str] = ffn_dim
UpperCamelCase_: str = activation_function
UpperCamelCase_: Union[str, Any] = activation_dropout
UpperCamelCase_: str = attention_dropout
UpperCamelCase_: str = max_position_embeddings
UpperCamelCase_: List[str] = initializer_range
UpperCamelCase_: str = None
UpperCamelCase_: List[Any] = 0
UpperCamelCase_: str = 2
UpperCamelCase_: Tuple = 1
def _a ( self ):
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def _a ( self ):
UpperCamelCase_: List[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase_: Dict = None
if self.use_input_mask:
UpperCamelCase_: Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_: Optional[int] = self.get_config()
UpperCamelCase_: Union[str, Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _a ( self ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_lowerCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_lowerCamelCase , )
def _a ( self ):
UpperCamelCase_: int = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,
): List[str] = config_and_inputs
UpperCamelCase_: Any = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : str =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
a : int =(TFXGLMForCausalLM,) if is_tf_available() else ()
a : str =(
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
a : Optional[int] =False
a : Any =False
a : Optional[int] =False
def _a ( self ):
UpperCamelCase_: Dict = TFXGLMModelTester(self )
UpperCamelCase_: Any = ConfigTester(self , config_class=_lowerCamelCase , n_embd=3_7 )
def _a ( self ):
self.config_tester.run_common_tests()
@slow
def _a ( self ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: Union[str, Any] = TFXGLMModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def _a ( self ):
super().test_resize_token_embeddings()
@require_tf
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self , _lowerCamelCase=True ):
UpperCamelCase_: Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase_: int = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase_: List[Any] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
UpperCamelCase_: int = model.generate(_lowerCamelCase , do_sample=_lowerCamelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCamelCase )
@slow
def _a ( self ):
UpperCamelCase_: str = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase_: int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
UpperCamelCase_: str = tokenizer('Today is a nice day and' , return_tensors='tf' )
UpperCamelCase_: int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
UpperCamelCase_: str = model.generate(_lowerCamelCase , do_sample=_lowerCamelCase , seed=[7, 0] )
UpperCamelCase_: Tuple = tokenizer.decode(output_ids[0] , skip_special_tokens=_lowerCamelCase )
UpperCamelCase_: Dict = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
@slow
def _a ( self ):
UpperCamelCase_: List[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase_: List[Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase_: Optional[int] = 'left'
# use different length sentences to test batching
UpperCamelCase_: str = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
UpperCamelCase_: List[str] = tokenizer(_lowerCamelCase , return_tensors='tf' , padding=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = inputs['input_ids']
UpperCamelCase_: Optional[Any] = model.generate(input_ids=_lowerCamelCase , attention_mask=inputs['attention_mask'] , max_new_tokens=1_2 )
UpperCamelCase_: Dict = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
UpperCamelCase_: int = model.generate(input_ids=_lowerCamelCase , max_new_tokens=1_2 )
UpperCamelCase_: Tuple = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
UpperCamelCase_: str = model.generate(input_ids=_lowerCamelCase , max_new_tokens=1_2 )
UpperCamelCase_: Tuple = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowerCamelCase )
UpperCamelCase_: int = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowerCamelCase )
UpperCamelCase_: Dict = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , [non_padded_sentence, padded_sentence] ) | 57 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(
A , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : Any , _lowerCamelCase : GenericTensor ) -> np.ndarray:
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def __A ( self : str , _lowerCamelCase : GenericTensor ) -> np.ndarray:
__magic_name__ = self.get_masked_index(_lowerCamelCase )
__magic_name__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __A ( self : int , _lowerCamelCase : GenericTensor ) -> Any:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] ) -> Dict[str, GenericTensor]:
if return_tensors is None:
__magic_name__ = self.framework
__magic_name__ = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase )
self.ensure_exactly_one_mask_token(_lowerCamelCase )
return model_inputs
def __A ( self : List[str] , _lowerCamelCase : int ) -> List[Any]:
__magic_name__ = self.model(**_lowerCamelCase )
__magic_name__ = model_inputs["input_ids"]
return model_outputs
def __A ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Dict=None ) -> Dict:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__magic_name__ = target_ids.shape[0]
__magic_name__ = model_outputs["input_ids"][0]
__magic_name__ = model_outputs["logits"]
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__magic_name__ = outputs.numpy()
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = stable_softmax(_lowerCamelCase , axis=-1 )
if target_ids is not None:
__magic_name__ = tf.gather_nd(tf.squeeze(_lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
__magic_name__ = tf.expand_dims(_lowerCamelCase , 0 )
__magic_name__ = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
__magic_name__ , __magic_name__ = topk.values.numpy(), topk.indices.numpy()
else:
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = logits.softmax(dim=-1 )
if target_ids is not None:
__magic_name__ = probs[..., target_ids]
__magic_name__ , __magic_name__ = probs.topk(_lowerCamelCase )
__magic_name__ = []
__magic_name__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__magic_name__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__magic_name__ = input_ids.numpy().copy()
if target_ids is not None:
__magic_name__ = target_ids[p].tolist()
__magic_name__ = p
# Filter padding out:
__magic_name__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__magic_name__ = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
__magic_name__ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(_lowerCamelCase )
result.append(_lowerCamelCase )
if single_mask:
return result[0]
return result
def __A ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=None ) -> List[str]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = [targets]
try:
__magic_name__ = self.tokenizer.get_vocab()
except Exception:
__magic_name__ = {}
__magic_name__ = []
for target in targets:
__magic_name__ = vocab.get(_lowerCamelCase , _lowerCamelCase )
if id_ is None:
__magic_name__ = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , max_length=1 , truncation=_lowerCamelCase , )["input_ids"]
if len(_lowerCamelCase ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"We cannot replace it with anything meaningful, ignoring it" )
continue
__magic_name__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__magic_name__ = list(set(_lowerCamelCase ) )
if len(_lowerCamelCase ) == 0:
raise ValueError("At least one target must be provided when passed." )
__magic_name__ = np.array(_lowerCamelCase )
return target_ids
def __A ( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : int=None ) -> Tuple:
__magic_name__ = {}
if targets is not None:
__magic_name__ = self.get_target_ids(_lowerCamelCase , _lowerCamelCase )
__magic_name__ = target_ids
if top_k is not None:
__magic_name__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : int , _lowerCamelCase : Any , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Optional[int]:
__magic_name__ = super().__call__(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 664 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def __lowerCAmelCase ( __UpperCamelCase : int = 1_0_0_0_0_0_0 , __UpperCamelCase : int = 1_0 ):
'''simple docstring'''
snake_case_ : defaultdict = defaultdict(__UpperCamelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
snake_case_ : Optional[Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
snake_case_ : List[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__UpperCamelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 58 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
'''simple docstring'''
if len(lowerCamelCase_ ) < k or k < 0:
raise ValueError("Invalid Input" )
__magic_name__ = __magic_name__ = sum(array[:k] )
for i in range(len(lowerCamelCase_ ) - k ):
__magic_name__ = current_sum - array[i] + array[i + k]
__magic_name__ = max(lowerCamelCase_ , lowerCamelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__magic_name__ : List[str] =[randint(-10_00, 10_00) for i in range(1_00)]
__magic_name__ : List[str] =randint(0, 1_10)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 664 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =len(__a )
# We need to create solution object to save path.
lowerCamelCase__: Optional[Any] =[[0 for _ in range(__a )] for _ in range(__a )]
lowerCamelCase__: int =run_maze(__a , 0 , 0 , __a )
if solved:
print("\n".join(str(__a ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> bool:
"""simple docstring"""
lowerCamelCase__: List[str] =len(__a )
# Final check point.
if i == j == (size - 1):
lowerCamelCase__: List[str] =1
return True
lowerCamelCase__: Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds
lowerCamelCase__: Dict =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCamelCase__: Optional[Any] =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCamelCase__: Optional[Any] =1
# check for directions
if (
run_maze(__a , i + 1 , __a , __a )
or run_maze(__a , __a , j + 1 , __a )
or run_maze(__a , i - 1 , __a , __a )
or run_maze(__a , __a , j - 1 , __a )
):
return True
lowerCamelCase__: List[Any] =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : int =logging.get_logger(__name__)
__magic_name__ : List[Any] ={}
class UpperCamelCase_ ( A ):
"""simple docstring"""
UpperCAmelCase__ : int = '''llama'''
UpperCAmelCase__ : Any = ['''past_key_values''']
def __init__( self : List[Any] , _lowerCamelCase : List[Any]=3_20_00 , _lowerCamelCase : Optional[Any]=40_96 , _lowerCamelCase : Tuple=1_10_08 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="silu" , _lowerCamelCase : Optional[Any]=20_48 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Union[str, Any]=1e-6 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Dict=0 , _lowerCamelCase : int=1 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[str]=None , **_lowerCamelCase : List[Any] , ) -> Any:
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = intermediate_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__magic_name__ = num_attention_heads
__magic_name__ = num_key_value_heads
__magic_name__ = hidden_act
__magic_name__ = initializer_range
__magic_name__ = rms_norm_eps
__magic_name__ = pretraining_tp
__magic_name__ = use_cache
__magic_name__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def __A ( self : Union[str, Any] ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
__magic_name__ = self.rope_scaling.get("type" , _lowerCamelCase )
__magic_name__ = self.rope_scaling.get("factor" , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 664 | 0 |
from __future__ import annotations
lowerCAmelCase_ = 1_0
def lowerCamelCase_ ( _UpperCamelCase ) -> list[int]:
"""simple docstring"""
snake_case_ : Optional[int] = 1
snake_case_ : int = max(_UpperCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
snake_case_ : list[list] = [[] for _ in range(_UpperCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
snake_case_ : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_UpperCamelCase )
# put each buckets' contents into list_of_ints
snake_case_ : Optional[Any] = 0
for b in range(_UpperCamelCase ):
for i in buckets[b]:
snake_case_ : Optional[int] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
'''simple docstring'''
__magic_name__ : Dict =8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 664 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _A ( ):
"""simple docstring"""
raise RuntimeError("CUDA out of memory." )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ) -> str:
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE__ ) ) )
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [128, 64, 32, 16, 8] )
def a ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCAmelCase__ , lowerCAmelCase__ = mock_training_loop_function("hello" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def a ( self : Union[str, Any] ) -> Any:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : List[Any] ):
pass
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def a ( self : List[str] ) -> Any:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : str ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def a ( self : Dict ) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def a ( self : Optional[Any] ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(SCREAMING_SNAKE_CASE__ : List[Any] ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def a ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase__ = torch.cuda.memory_allocated()
lowerCAmelCase__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = release_memory(SCREAMING_SNAKE_CASE__ )
self.assertEqual(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE__ )
| 61 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__magic_name__ : List[Any] =logging.getLogger(__name__)
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : str=-1 ) -> List[str]:
# in NER datasets, the last column is usually reserved for NER label
__magic_name__ = label_idx
def __A ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
__magic_name__ = []
__magic_name__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
__magic_name__ = []
__magic_name__ = []
else:
__magic_name__ = line.split(" " )
words.append(splits[0] )
if len(_lowerCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
return examples
def __A ( self : Optional[Any] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Union[str, Any]:
__magic_name__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(_lowerCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__magic_name__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_lowerCamelCase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __A ( self : Tuple , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : int ) -> str:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __A ( self : int , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = []
__magic_name__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
return examples
def __A ( self : Optional[int] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Any:
__magic_name__ = 0
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = preds_list[example_id]
__magic_name__ = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(_lowerCamelCase )
example_id += 1
def __A ( self : Dict , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 664 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache"
SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : str = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = parquet_path
elif issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Any = [parquet_path]
SCREAMING_SNAKE_CASE : str = tmp_path / "cache"
SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
for split in splits:
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Optional[int] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : Dict = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE : Dict = {split: parquet_path}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = "train"
SCREAMING_SNAKE_CASE : Dict = {"train": parquet_path, "test": parquet_path}
SCREAMING_SNAKE_CASE : str = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : Optional[int] = pq.ParquetFile(tmp_path / "foo.parquet" )
SCREAMING_SNAKE_CASE : Union[str, Any] = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = str(shared_datadir / "test_image_rgb.jpg" )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]}
SCREAMING_SNAKE_CASE : Any = Features({"image": Image()} )
SCREAMING_SNAKE_CASE : List[Any] = Dataset.from_dict(lowercase , features=lowercase )
SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : Dict = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert get_writer_batch_size(lowercase ) == expected
| 62 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0 ) -> None:
__magic_name__ , __magic_name__ = row, column
__magic_name__ = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self : Optional[Any] ) -> str:
__magic_name__ = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__magic_name__ = 0
for row_vector in self.array:
for obj in row_vector:
__magic_name__ = max(_lowerCamelCase , len(str(_lowerCamelCase ) ) )
__magic_name__ = f'%{max_element_length}s'
# Make string and return
def single_line(_lowerCamelCase : list[float] ) -> str:
nonlocal string_format_identifier
__magic_name__ = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
return str(self )
def __A ( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ) -> bool:
if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] , _lowerCamelCase : tuple[int, int] ) -> Any:
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : float ) -> None:
assert self.validate_indicies(_lowerCamelCase )
__magic_name__ = value
def __add__( self : Union[str, Any] , _lowerCamelCase : Matrix ) -> Matrix:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = -self[r, c]
return result
def __sub__( self : Optional[int] , _lowerCamelCase : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Optional[int] , _lowerCamelCase : int | float | Matrix ) -> Matrix:
if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__magic_name__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__magic_name__ = f'Unsupported type given for another ({type(_lowerCamelCase )})'
raise TypeError(_lowerCamelCase )
def __A ( self : Optional[int] ) -> Matrix:
__magic_name__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c]
return result
def __A ( self : int , _lowerCamelCase : Matrix , _lowerCamelCase : Matrix ) -> Any:
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__magic_name__ = v.transpose()
__magic_name__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
__magic_name__ = 1
print(F'a^(-1) is {ainv}' )
# u, v
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 1, 2, -3
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}' )
def __snake_case ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 664 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Optional[Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__magic_name__ : List[Any] =logging.getLogger(__name__)
__magic_name__ : int ='Hello world! cécé herlolip'
__magic_name__ : List[Any] =namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def __snake_case ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = BertAbsConfig(
temp_dir="." , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__magic_name__ = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage )
__magic_name__ = AbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) , lowerCamelCase_ )
original.eval()
__magic_name__ = BertAbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
__magic_name__ = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
__magic_name__ = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
__magic_name__ = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__magic_name__ = encoder_input_ids
__magic_name__ = decoder_input_ids
__magic_name__ = __magic_name__ = None
__magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__magic_name__ = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = original.generator(lowerCamelCase_ )
__magic_name__ = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = new_model.generator(lowerCamelCase_ )
__magic_name__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__magic_name__ : Dict =argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__magic_name__ : Any =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 664 | 0 |
class _lowerCamelCase :
def __init__( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= ''''''
SCREAMING_SNAKE_CASE__: int= ''''''
SCREAMING_SNAKE_CASE__: Optional[int]= []
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
SCREAMING_SNAKE_CASE__: Optional[int]= self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
SCREAMING_SNAKE_CASE__: List[str]= self.__min_dist_top_down_dp(lowerCAmelCase , n - 1 )
SCREAMING_SNAKE_CASE__: str= self.__min_dist_top_down_dp(m - 1 , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.__min_dist_top_down_dp(m - 1 , n - 1 )
SCREAMING_SNAKE_CASE__: Optional[int]= 1 + min(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return self.dp[m][n]
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: Optional[int]= worda
SCREAMING_SNAKE_CASE__: Optional[int]= worda
SCREAMING_SNAKE_CASE__: Optional[int]= [[-1 for _ in range(len(lowerCAmelCase ) )] for _ in range(len(lowerCAmelCase ) )]
return self.__min_dist_top_down_dp(len(lowerCAmelCase ) - 1 , len(lowerCAmelCase ) - 1 )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: int= worda
SCREAMING_SNAKE_CASE__: Any= worda
SCREAMING_SNAKE_CASE__: List[str]= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
SCREAMING_SNAKE_CASE__: Optional[Any]= j
elif j == 0: # second string is empty
SCREAMING_SNAKE_CASE__: List[Any]= i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
SCREAMING_SNAKE_CASE__: Dict= self.dp[i - 1][j - 1]
else:
SCREAMING_SNAKE_CASE__: List[Any]= self.dp[i][j - 1]
SCREAMING_SNAKE_CASE__: int= self.dp[i - 1][j]
SCREAMING_SNAKE_CASE__: Tuple= self.dp[i - 1][j - 1]
SCREAMING_SNAKE_CASE__: str= 1 + min(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
lowercase_ : Any = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
lowercase_ : Optional[Any] = input('Enter the first string: ').strip()
lowercase_ : List[Any] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> str:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __A ( self : List[Any] ) -> str:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(_lowerCamelCase ) # fails here
def __A ( self : List[Any] ) -> int:
__magic_name__ = [[1, 2, 3], [1, 2, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(3 )
__magic_name__ = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __A ( self : Any ) -> Union[str, Any]:
__magic_name__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 664 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class __lowercase ( __lowerCamelCase ):
snake_case_ = ["""input_features""", """attention_mask"""]
def __init__( self : Any ,A : str=80 ,A : Optional[int]=16_000 ,A : int=0.0 ,A : str=10 ,A : Any=25 ,A : str="hamming_window" ,A : int=3_2_7_6_8.0 ,A : List[str]=0.9_7 ,A : Optional[int]=1.0 ,A : Optional[Any]=True ,A : Tuple=True ,A : Any=False ,**A : int ,):
'''simple docstring'''
super().__init__(feature_size=A ,sampling_rate=A ,padding_value=A ,**A )
UpperCAmelCase__ : str = feature_size
UpperCAmelCase__ : int = sampling_rate
UpperCAmelCase__ : int = padding_value
UpperCAmelCase__ : Dict = hop_length
UpperCAmelCase__ : int = win_length
UpperCAmelCase__ : Dict = frame_signal_scale
UpperCAmelCase__ : Dict = preemphasis_coeff
UpperCAmelCase__ : str = mel_floor
UpperCAmelCase__ : Any = normalize_means
UpperCAmelCase__ : str = normalize_vars
UpperCAmelCase__ : int = win_function
UpperCAmelCase__ : List[Any] = return_attention_mask
UpperCAmelCase__ : str = win_length * sampling_rate // 1_000
UpperCAmelCase__ : List[Any] = hop_length * sampling_rate // 1_000
UpperCAmelCase__ : int = optimal_fft_length(self.sample_size )
UpperCAmelCase__ : List[Any] = (self.n_fft // 2) + 1
def __lowercase ( self : Union[str, Any] ,A : np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=A )
else:
UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function )
UpperCAmelCase__ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
UpperCAmelCase__ : Optional[Any] = spectrogram(
one_waveform * self.frame_signal_scale ,window=A ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=A ,preemphasis=self.preemphasis_coeff ,mel_filters=A ,mel_floor=self.mel_floor ,log_mel="""log""" ,)
return msfc_features.T
def __lowercase ( self : str ,A : Any ,A : Optional[int] ,A : str ):
'''simple docstring'''
# make sure we normalize float32 arrays
if self.normalize_means:
UpperCAmelCase__ : Optional[Any] = x[:input_length].mean(axis=0 )
UpperCAmelCase__ : Any = np.subtract(A ,A )
if self.normalize_vars:
UpperCAmelCase__ : str = x[:input_length].std(axis=0 )
UpperCAmelCase__ : Optional[int] = np.divide(A ,A )
if input_length < x.shape[0]:
UpperCAmelCase__ : int = padding_value
# make sure array is in float32
UpperCAmelCase__ : str = x.astype(np.floataa )
return x
def __lowercase ( self : Union[str, Any] ,A : List[np.ndarray] ,A : Optional[np.ndarray] = None ):
'''simple docstring'''
UpperCAmelCase__ : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(A ,A ,self.padding_value ) for x, n in zip(A ,A )]
def __call__( self : Union[str, Any] ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : Union[bool, str, PaddingStrategy] = False ,A : Optional[int] = None ,A : bool = False ,A : Optional[int] = None ,A : Optional[bool] = None ,A : Optional[Union[str, TensorType]] = None ,A : Optional[int] = None ,**A : Tuple ,):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCAmelCase__ : Optional[Any] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
UpperCAmelCase__ : Any = is_batched_numpy or (
isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ : List[str] = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A ,np.ndarray ):
UpperCAmelCase__ : Union[str, Any] = np.asarray(A ,dtype=np.floataa )
elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ : Optional[Any] = [raw_speech]
# extract fbank features
UpperCAmelCase__ : Tuple = [self._extract_mfsc_features(A ) for one_waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase__ : str = BatchFeature({"""input_features""": features} )
UpperCAmelCase__ : Optional[Any] = self.pad(
A ,padding=A ,max_length=A ,truncation=A ,pad_to_multiple_of=A ,return_attention_mask=A ,**A ,)
# make sure list is in array format
UpperCAmelCase__ : Tuple = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] ,A ):
UpperCAmelCase__ : Union[str, Any] = [np.asarray(A ,dtype=np.floataa ) for feature in input_features]
UpperCAmelCase__ : Dict = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
UpperCAmelCase__ : Union[str, Any] = (
np.array(A ,dtype=np.intaa )
if self._get_padding_strategies(A ,max_length=A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
UpperCAmelCase__ : Any = self.normalize(
padded_inputs["""input_features"""] ,attention_mask=A )
if return_tensors is not None:
UpperCAmelCase__ : Union[str, Any] = padded_inputs.convert_to_tensors(A )
return padded_inputs
| 65 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__magic_name__ : Dict ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Union[str, Any]:
__magic_name__ = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def __A ( cls : Any ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def __A ( self : Optional[Any] ) -> Dict:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCamelCase , repo_id="test-config" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : str ) -> Optional[int]:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_lowerCamelCase , repo_id="valid_org/test-config-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : Optional[int] ) -> Union[str, Any]:
CustomConfig.register_for_auto_class()
__magic_name__ = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
__magic_name__ = AutoConfig.from_pretrained(f'{USER}/test-dynamic-config' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Optional[Any]:
__magic_name__ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__magic_name__ = c.n_embd + 1 # int
__magic_name__ = c.resid_pdrop + 1.0 # float
__magic_name__ = not c.scale_attn_weights # bool
__magic_name__ = c.summary_type + "foo" # str
c.update_from_string(
f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(_lowerCamelCase , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(_lowerCamelCase , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(_lowerCamelCase , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(_lowerCamelCase , c.summary_type , "mismatch for key: summary_type" )
def __A ( self : List[Any] ) -> Union[str, Any]:
__magic_name__ = PretrainedConfig()
__magic_name__ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_lowerCamelCase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
__magic_name__ = [key for key, value in config_common_kwargs.items() if value == getattr(_lowerCamelCase , _lowerCamelCase )]
if len(_lowerCamelCase ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f' {", ".join(_lowerCamelCase )}.' )
def __A ( self : List[Any] ) -> List[Any]:
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(_lowerCamelCase )
def __A ( self : Tuple ) -> int:
# A mock response for an HTTP head request to emulate server down
__magic_name__ = mock.Mock()
__magic_name__ = 5_00
__magic_name__ = {}
__magic_name__ = HTTPError
__magic_name__ = {}
# Download this model to make sure it's in the cache.
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Union[str, Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
__magic_name__ = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def __A ( self : Dict ) -> Optional[int]:
__magic_name__ = AutoConfig.from_pretrained("bert-base-cased" )
__magic_name__ = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_lowerCamelCase )
__magic_name__ = 2
json.dump(configuration.to_dict() , open(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__magic_name__ = ["config.42.0.0.json"]
__magic_name__ = 7_68
configuration.save_pretrained(_lowerCamelCase )
shutil.move(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , os.path.join(_lowerCamelCase , "config.42.0.0.json" ) )
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def __A ( self : Optional[int] ) -> str:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__magic_name__ = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__magic_name__ = "v4.0.0"
__magic_name__ , __magic_name__ = new_transformers.models.auto.AutoConfig.from_pretrained(
_lowerCamelCase , return_unused_kwargs=_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_lowerCamelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__magic_name__ = "v3.0.0"
__magic_name__ = old_transformers.models.auto.AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 664 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , ) -> Tuple:
_lowercase : List[str] = {}
if train_file is not None:
_lowercase : Tuple = [train_file]
if eval_file is not None:
_lowercase : Optional[Any] = [eval_file]
if test_file is not None:
_lowercase : Dict = [test_file]
_lowercase : Dict = datasets.load_dataset('csv' , data_files=SCREAMING_SNAKE_CASE )
_lowercase : Dict = list(ds[list(files.keys() )[0]].features.keys() )
_lowercase : List[Any] = features_name.pop(SCREAMING_SNAKE_CASE )
_lowercase : Any = list(set(ds[list(files.keys() )[0]][label_name] ) )
_lowercase : Dict = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )}
_lowercase : Tuple = tokenizer.model_input_names
_lowercase : int = {}
if len(SCREAMING_SNAKE_CASE ) == 1:
for k in files.keys():
_lowercase : List[str] = ds[k].map(
lambda SCREAMING_SNAKE_CASE : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' ) , batched=SCREAMING_SNAKE_CASE , )
elif len(SCREAMING_SNAKE_CASE ) == 2:
for k in files.keys():
_lowercase : Tuple = ds[k].map(
lambda SCREAMING_SNAKE_CASE : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' , ) , batched=SCREAMING_SNAKE_CASE , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_lowercase : str = {k: v for k, v in ex.items() if k in input_names}
_lowercase : List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_lowercase : str = {k: v for k, v in ex.items() if k in input_names}
_lowercase : Tuple = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_lowercase : int = {k: v for k, v in ex.items() if k in input_names}
_lowercase : str = labelaid[ex[label_name]]
yield (d, label)
_lowercase : str = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_lowercase : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_lowercase : int = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_lowercase : str = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_lowercase : List[str] = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_lowercase : Optional[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
_UpperCamelCase : int = field(metadata={"help": "Which column contains the label"} )
_UpperCamelCase : str = field(default=__snake_case , metadata={"help": "The path of the training file"} )
_UpperCamelCase : Optional[str] = field(default=__snake_case , metadata={"help": "The path of the development file"} )
_UpperCamelCase : Optional[str] = field(default=__snake_case , metadata={"help": "The path of the test file"} )
_UpperCamelCase : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase : bool = field(
default=__snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class lowerCAmelCase_ :
_UpperCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCamelCase : Optional[str] = field(
default=__snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCamelCase : Optional[str] = field(
default=__snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCamelCase : bool = field(default=__snake_case , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCamelCase : Optional[str] = field(
default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def __magic_name__ ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_lowercase , _lowercase , _lowercase : Optional[int] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowercase , _lowercase , _lowercase , _lowercase : int = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=SCREAMING_SNAKE_CASE , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_lowercase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE ) , labelaid=SCREAMING_SNAKE_CASE , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_lowercase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
def compute_metrics(SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Optional[int] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_lowercase : Optional[Any] = TFTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowercase : Dict = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowercase : int = trainer.evaluate()
_lowercase : List[Any] = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
main()
| 66 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]=7 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[Any]=18 , _lowerCamelCase : Union[str, Any]=30 , _lowerCamelCase : Tuple=4_00 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=True , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , ) -> Dict:
__magic_name__ = size if size is not None else {"height": 18, "width": 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
def __A ( self : int ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( A , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = DPTImageProcessor if is_vision_available() else None
def __A ( self : Dict ) -> Any:
__magic_name__ = DPTImageProcessingTester(self )
@property
def __A ( self : str ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> List[str]:
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size" ) )
def __A ( self : List[str] ) -> List[Any]:
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __A ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Dict ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Optional[int] ) -> Dict:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 664 | 0 |
import string
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> None:
for key in range(len(string.ascii_uppercase ) ):
_lowercase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
_lowercase = string.ascii_uppercase.find(snake_case__ )
_lowercase = num - key
if num < 0:
_lowercase = num + len(string.ascii_uppercase )
_lowercase = translated + string.ascii_uppercase[num]
else:
_lowercase = translated + symbol
print(F"""Decryption using Key #{key}: {translated}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = input('Encrypted message: ' )
_lowercase = message.upper()
decrypt(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 67 |
'''simple docstring'''
import numpy
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : numpy.ndarray ) -> None:
__magic_name__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__magic_name__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__magic_name__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__magic_name__ = numpy.random.rand(3 , 1 )
# Real output values provided.
__magic_name__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__magic_name__ = numpy.zeros(output_array.shape )
def __A ( self : int ) -> numpy.ndarray:
__magic_name__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __A ( self : Dict ) -> None:
__magic_name__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__magic_name__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__magic_name__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __A ( self : Optional[int] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : int , _lowerCamelCase : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
__magic_name__ = self.feedforward()
self.back_propagation()
if give_loss:
__magic_name__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def __A ( self : Tuple , _lowerCamelCase : numpy.ndarray ) -> int:
__magic_name__ = input_arr
__magic_name__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__magic_name__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__magic_name__ = TwoHiddenLayerNeuralNetwork(
input_array=lowerCamelCase_ , output_array=lowerCamelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowerCamelCase_ , iterations=10 , give_loss=lowerCamelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class _A :
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=99 , __SCREAMING_SNAKE_CASE : int=13 , __SCREAMING_SNAKE_CASE : int=16 , __SCREAMING_SNAKE_CASE : Tuple=7 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Dict=30 , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : Dict=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : str=None , ) -> Optional[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =decoder_seq_length
# For common tests
__UpperCAmelCase =self.decoder_seq_length
__UpperCAmelCase =is_training
__UpperCAmelCase =use_attention_mask
__UpperCAmelCase =use_labels
__UpperCAmelCase =vocab_size
__UpperCAmelCase =d_model
__UpperCAmelCase =d_model
__UpperCAmelCase =decoder_layers
__UpperCAmelCase =decoder_layers
__UpperCAmelCase =decoder_ffn_dim
__UpperCAmelCase =decoder_attention_heads
__UpperCAmelCase =decoder_attention_heads
__UpperCAmelCase =eos_token_id
__UpperCAmelCase =bos_token_id
__UpperCAmelCase =pad_token_id
__UpperCAmelCase =decoder_start_token_id
__UpperCAmelCase =use_cache
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =None
__UpperCAmelCase =decoder_seq_length
__UpperCAmelCase =2
__UpperCAmelCase =1
def _a ( self : str ) -> Tuple:
__UpperCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCAmelCase =None
if self.use_attention_mask:
__UpperCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__UpperCAmelCase =None
if self.use_labels:
__UpperCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCAmelCase =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , ) -> Optional[Any]:
__UpperCAmelCase =True
__UpperCAmelCase =TrOCRDecoder(config=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ).eval()
__UpperCAmelCase =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) + 1 )
__UpperCAmelCase =outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__UpperCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
__UpperCAmelCase =model(__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
# select random slice
__UpperCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__UpperCAmelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 )
def _a ( self : Dict ) -> List[Any]:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class _A ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Optional[int] = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : int = True
lowerCamelCase : List[str] = False
def _a ( self : Dict ) -> Dict:
__UpperCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Any:
pass
def _a ( self : Dict ) -> Union[str, Any]:
pass
def _a ( self : List[Any] ) -> Any:
pass
def _a ( self : Tuple ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _a ( self : Any ) -> Optional[Any]:
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> Tuple:
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _a ( self : Optional[int] ) -> int:
pass
| 68 |
'''simple docstring'''
import torch
from transformers import AutoModel
class UpperCamelCase_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Any , _lowerCamelCase : Optional[int]="sayef/fsner-bert-base-uncased" ) -> List[Any]:
super(_lowerCamelCase , self ).__init__()
__magic_name__ = AutoModel.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
__magic_name__ = torch.nn.CosineSimilarity(3 , 1e-08 )
__magic_name__ = torch.nn.Softmax(dim=1 )
def __A ( self : Tuple , **_lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return self.bert(**_lowerCamelCase ).last_hidden_state
def __A ( self : Dict , _lowerCamelCase : Dict ) -> Dict:
return token_embeddings.sum(2 , keepdim=_lowerCamelCase )
def __A ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Tuple=1 ) -> Optional[Any]:
return self.softmax(T * self.cos(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ) -> List[str]:
__magic_name__ = W_supports["sizes"].tolist()
__magic_name__ = W_supports["start_token_id"].item()
__magic_name__ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = W_supports["input_ids"] == start_token_id
__magic_name__ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_lowerCamelCase ):
if i == 0:
__magic_name__ = 0
else:
__magic_name__ = support_sizes[i - 1]
__magic_name__ = S[s : s + size][start_token_masks[s : s + size]]
__magic_name__ = S[s : s + size][end_token_masks[s : s + size]]
__magic_name__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__magic_name__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__magic_name__ = torch.vstack((p_starts, p_start) )
__magic_name__ = torch.vstack((p_ends, p_end) )
else:
__magic_name__ = p_start
__magic_name__ = p_end
return p_starts, p_ends
| 664 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] , a_ : int , a_ : int , a_ : float = 0 ):
"""simple docstring"""
__snake_case , __snake_case = row, column
__snake_case = [[default_value for c in range(a_ )] for r in range(a_ )]
def __str__( self : Dict ):
"""simple docstring"""
__snake_case = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__snake_case = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case = max(a_ , len(str(a_ ) ) )
__snake_case = f'''%{max_element_length}s'''
# Make string and return
def single_line(a_ : list[float] ) -> str:
nonlocal string_format_identifier
__snake_case = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(a_ ) for row_vector in self.array )
return s
def __repr__( self : Any ):
"""simple docstring"""
return str(self )
def A ( self : List[str] , a_ : tuple[int, int] ):
"""simple docstring"""
if not (isinstance(a_ , (list, tuple) ) and len(a_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Tuple , a_ : tuple[int, int] ):
"""simple docstring"""
assert self.validate_indicies(a_ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple , a_ : tuple[int, int] , a_ : float ):
"""simple docstring"""
assert self.validate_indicies(a_ )
__snake_case = value
def __add__( self : Any , a_ : Matrix ):
"""simple docstring"""
assert isinstance(a_ , a_ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case = self[r, c] + another[r, c]
return result
def __neg__( self : List[str] ):
"""simple docstring"""
__snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case = -self[r, c]
return result
def __sub__( self : int , a_ : Matrix ):
"""simple docstring"""
return self + (-another)
def __mul__( self : Tuple , a_ : int | float | Matrix ):
"""simple docstring"""
if isinstance(a_ , (int, float) ): # Scalar multiplication
__snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case = self[r, c] * another
return result
elif isinstance(a_ , a_ ): # Matrix multiplication
assert self.column == another.row
__snake_case = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case = f'''Unsupported type given for another ({type(a_ )})'''
raise TypeError(a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case = self[r, c]
return result
def A ( self : List[Any] , a_ : Matrix , a_ : Matrix ):
"""simple docstring"""
assert isinstance(a_ , a_ ) and isinstance(a_ , a_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case = v.transpose()
__snake_case = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __UpperCAmelCase ( ) -> None:
# a^(-1)
__snake_case = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
__snake_case = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case = 1, 2, -3
__snake_case = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_UpperCAmelCase , _UpperCAmelCase )}''' )
def __UpperCAmelCase ( ) -> None:
import doctest
doctest.testmod()
testa()
| 69 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 664 | 0 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = (EulerDiscreteScheduler,)
UpperCamelCase = 10
def a__ ( self : Dict , **A_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = {
'num_train_timesteps': 1100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**A_ )
return config
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A_ )
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ = scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_ = model(A_ , A_ )
lowerCamelCase_ = scheduler.step(A_ , A_ , A_ , generator=A_ )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(A_ ) )
lowerCamelCase_ = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCamelCase_ = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ = scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_ = model(A_ , A_ )
lowerCamelCase_ = scheduler.step(A_ , A_ , A_ , generator=A_ )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(A_ ) )
lowerCamelCase_ = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2_676E-06 ) < 1E-3
def a__ ( self : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps , device=A_ )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCamelCase_ = sample.to(A_ )
for t in scheduler.timesteps:
lowerCamelCase_ = scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_ = model(A_ , A_ )
lowerCamelCase_ = scheduler.step(A_ , A_ , A_ , generator=A_ )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(A_ ) )
lowerCamelCase_ = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**A_ , use_karras_sigmas=A_ )
scheduler.set_timesteps(self.num_inference_steps , device=A_ )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCamelCase_ = sample.to(A_ )
for t in scheduler.timesteps:
lowerCamelCase_ = scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_ = model(A_ , A_ )
lowerCamelCase_ = scheduler.step(A_ , A_ , A_ , generator=A_ )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(A_ ) )
lowerCamelCase_ = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1E-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1E-3
| 70 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __snake_case ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = AutoConfig.from_pretrained(lowerCamelCase_ )
__magic_name__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_ )
__magic_name__ = checkpoints.load_tax_checkpoint(lowerCamelCase_ )
__magic_name__ = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
__magic_name__ = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__magic_name__ = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["encoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_global_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = tax_mlp_layer_norm
__magic_name__ = flax_model_encoder_layer_block
# Only for layer 0:
__magic_name__ = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_global_rel_embedding
# Assigning
__magic_name__ = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
__magic_name__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
__magic_name__ = tax_enc_dec_attention_module["key"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["out"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["query"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["decoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_pre_attention_layer_norm
__magic_name__ = tax_enc_dec_attention_key
__magic_name__ = tax_enc_dec_attention_out
__magic_name__ = tax_enc_dec_attention_query
__magic_name__ = tax_enc_dec_attention_value
__magic_name__ = tax_cross_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = txa_mlp_layer_norm
__magic_name__ = flax_model_decoder_layer_block
# Decoder Normalization
__magic_name__ = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
__magic_name__ = txa_decoder_norm
# Only for layer 0:
__magic_name__ = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_decoder_rel_embedding
# Token Embeddings
__magic_name__ = tax_model["target"]["token_embedder"]["embedding"]
__magic_name__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__magic_name__ = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(lowerCamelCase_ )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
__magic_name__ : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
__magic_name__ : Optional[int] =parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 664 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE ) as metadata_file:
UpperCAmelCase_ : List[Any] = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
UpperCAmelCase_ : Any = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
# Load the entity vocab file
UpperCAmelCase_ : Optional[Any] = load_entity_vocab(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCAmelCase_ : List[Any] = AddedToken("<ent>" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = AddedToken("<ent2>" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = LukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
UpperCAmelCase_ : str = state_dict["embeddings.word_embeddings.weight"]
UpperCAmelCase_ : Dict = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
UpperCAmelCase_ : Any = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
UpperCAmelCase_ : int = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCAmelCase_ : List[str] = F'''encoder.layer.{layer_index}.attention.self.'''
UpperCAmelCase_ : Tuple = state_dict[prefix + matrix_name]
UpperCAmelCase_ : Union[str, Any] = state_dict[prefix + matrix_name]
UpperCAmelCase_ : Optional[int] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCAmelCase_ : List[str] = state_dict["entity_embeddings.entity_embeddings.weight"]
UpperCAmelCase_ : int = entity_emb[entity_vocab["[MASK]"]]
UpperCAmelCase_ : Dict = LukeModel(config=_SCREAMING_SNAKE_CASE ).eval()
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if not (len(_SCREAMING_SNAKE_CASE ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {", ".join(_SCREAMING_SNAKE_CASE )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
UpperCAmelCase_ : Optional[Any] = LukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task="entity_classification" )
UpperCAmelCase_ : Tuple = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
UpperCAmelCase_ : Optional[Any] = (39, 42)
UpperCAmelCase_ : List[str] = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , add_prefix_space=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
UpperCAmelCase_ : Dict = model(**_SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
UpperCAmelCase_ : str = torch.Size((1, 42, 10_24) )
UpperCAmelCase_ : List[Any] = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
UpperCAmelCase_ : int = torch.Size((1, 42, 7_68) )
UpperCAmelCase_ : Dict = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
UpperCAmelCase_ : int = torch.Size((1, 1, 10_24) )
UpperCAmelCase_ : Optional[Any] = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
UpperCAmelCase_ : int = torch.Size((1, 1, 7_68) )
UpperCAmelCase_ : Optional[Any] = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(_SCREAMING_SNAKE_CASE ) )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = {}
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = line.rstrip().split("\t" )
UpperCAmelCase_ : List[str] = index
return entity_vocab
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 71 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase_ ( unittest.TestCase , A ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Any:
__magic_name__ = load_tool("text-to-speech" )
self.tool.setup()
def __A ( self : Union[str, Any] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def __A ( self : List[str] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 664 | 0 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : int ) -> bool:
'''simple docstring'''
lowercase =(1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCamelCase ( lowercase_ : int = 5_0_0_0 ) -> int:
'''simple docstring'''
lowercase =[(i * (3 * i - 1)) // 2 for i in range(1 , lowercase_ )]
for i, pentagonal_i in enumerate(lowercase_ ):
for j in range(lowercase_ , len(lowercase_ ) ):
lowercase =pentagonal_nums[j]
lowercase =pentagonal_i + pentagonal_j
lowercase =pentagonal_j - pentagonal_i
if is_pentagonal(lowercase_ ) and is_pentagonal(lowercase_ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__magic_name__ : Dict =re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
__magic_name__ : int =10
__magic_name__ : Union[str, Any] =2_56
def __snake_case ( lowerCamelCase_ : List[str] ):
'''simple docstring'''
if len(lowerCamelCase_ ) < MIN_NUM_TOKENS:
return None
__magic_name__ = MinHash(num_perm=lowerCamelCase_ )
for token in set(lowerCamelCase_ ):
min_hash.update(token.encode() )
return min_hash
def __snake_case ( lowerCamelCase_ : str ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCamelCase_ ) if len(t.strip() ) > 0}
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *,
_lowerCamelCase : float = 0.85 , ) -> Optional[Any]:
__magic_name__ = duplication_jaccard_threshold
__magic_name__ = NUM_PERM
__magic_name__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__magic_name__ = defaultdict(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : MinHash ) -> None:
__magic_name__ = self._index.query(_lowerCamelCase )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCamelCase )
def __A ( self : Union[str, Any] ) -> List[List[Dict]]:
__magic_name__ = []
for base, duplicates in self._duplicate_clusters.items():
__magic_name__ = [base] + list(_lowerCamelCase )
# reformat the cluster to be a list of dict
__magic_name__ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(_lowerCamelCase )
return duplicate_clusters
def __A ( self : Tuple , _lowerCamelCase : Tuple ) -> None:
__magic_name__ = self.get_duplicate_clusters()
with open(_lowerCamelCase , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def __snake_case ( lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ , __magic_name__ = element
__magic_name__ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __snake_case ( lowerCamelCase_ : Type[Dataset] ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCamelCase_ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float ):
'''simple docstring'''
__magic_name__ = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCamelCase_ , lowerCamelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = get_tokens(lowerCamelCase_ )
__magic_name__ = get_tokens(lowerCamelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__magic_name__ : List[str] =None
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ = []
for elementa in cluster:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(lowerCamelCase_ , lowerCamelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__magic_name__ = 1
extremes.append(lowerCamelCase_ )
return extremes
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
global _shared_dataset
__magic_name__ = dataset
__magic_name__ = []
__magic_name__ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCamelCase_ , lowerCamelCase_ , ) , total=len(lowerCamelCase_ ) , ):
extremes_list.append(lowerCamelCase_ )
return extremes_list
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float = 0.85 ):
'''simple docstring'''
__magic_name__ = make_duplicate_clusters(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
__magic_name__ = {}
__magic_name__ = find_extremes(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__magic_name__ = element
__magic_name__ = duplicate_indices - set(extreme_dict.keys() )
__magic_name__ = dataset.filter(lambda lowerCamelCase_ , lowerCamelCase_ : idx not in remove_indices , with_indices=lowerCamelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__magic_name__ = element["base_index"] in extreme_dict
if element["is_extreme"]:
__magic_name__ = extreme_dict[element["base_index"]]["copies"]
print(F'Original dataset size: {len(lowerCamelCase_ )}' )
print(F'Number of duplicate clusters: {len(lowerCamelCase_ )}' )
print(F'Files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Unique files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Filtered dataset size: {len(lowerCamelCase_ )}' )
return ds_filter, duplicate_clusters
| 664 | 0 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
a_ : str = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def lowerCamelCase__ (_UpperCAmelCase=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) )
class _snake_case ( A__ ):
_lowercase : Optional[Any] = None
_lowercase : Optional[Any] = None
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Optional[Any]:
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = dataset_module_factory(a , cache_dir=a)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=a)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=a , config_name=a , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a).replace(os.sep , '/'),
config.DATASET_INFO_FILENAME,
])
SCREAMING_SNAKE_CASE = cached_path(a , cache_dir=a)
self.assertTrue(os.path.exists(a))
@pytest.mark.integration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('test_hf_gcp') / 'test_wikipedia_simple'
SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
SCREAMING_SNAKE_CASE = None
builder_instance.download_and_prepare()
SCREAMING_SNAKE_CASE = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=_UpperCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCAmelCase , _UpperCAmelCase)
assert "train" in ds
assert isinstance(ds['train'] , _UpperCAmelCase)
assert next(iter(ds['train']))
| 73 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
__magic_name__ : Optional[int] =logging.get_logger(__name__)
__magic_name__ : Tuple ='The Nymphenburg Palace is a beautiful palace in Munich!'
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
__magic_name__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__magic_name__ = os.path.join(get_home_dir() , "models" )
__magic_name__ = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__magic_name__ = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__magic_name__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(lowerCamelCase_ ),
}
__magic_name__ = BertConfig.from_dict(lowerCamelCase_ )
__magic_name__ = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ):
__magic_name__ = hf_param.shape
__magic_name__ = to_torch(params[gluon_param] )
__magic_name__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ = layer.attention.self
__magic_name__ = check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__magic_name__ = check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__magic_name__ = check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__magic_name__ = check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__magic_name__ = check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__magic_name__ = check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__magic_name__ = layer.attention.output
__magic_name__ = check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
__magic_name__ = check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__magic_name__ = layer.intermediate
__magic_name__ = check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__magic_name__ = check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__magic_name__ = layer.output
__magic_name__ = check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__magic_name__ = check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ = RobertaTokenizer.from_pretrained("roberta-base" )
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ )["input_ids"]
# Get gluon output
__magic_name__ = mx.nd.array([input_ids] )
__magic_name__ = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__magic_name__ = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ , return_tensors="pt" )
__magic_name__ = hf_bort_model(**lowerCamelCase_ )[0]
__magic_name__ = output_gluon[0].asnumpy()
__magic_name__ = output_hf[0].detach().numpy()
__magic_name__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , lowerCamelCase_ )
if __name__ == "__main__":
__magic_name__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ : Optional[Any] =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 664 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 1
@register_to_config
def __init__( self : List[str] , _A : int = 1000 , _A : Optional[Union[np.ndarray, List[float]]] = None ):
"""simple docstring"""
self.set_timesteps(_A )
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__SCREAMING_SNAKE_CASE : Any = 4
# running values
__SCREAMING_SNAKE_CASE : Tuple = []
def UpperCAmelCase__ ( self : Dict , _A : int , _A : Union[str, torch.device] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.sin(steps * math.pi / 2 ) ** 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = (1.0 - self.betas**2) ** 0.5
__SCREAMING_SNAKE_CASE : str = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__SCREAMING_SNAKE_CASE : List[Any] = timesteps.to(_A )
__SCREAMING_SNAKE_CASE : Any = []
def UpperCAmelCase__ ( self : int , _A : torch.FloatTensor , _A : int , _A : torch.FloatTensor , _A : bool = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = (self.timesteps == timestep).nonzero().item()
__SCREAMING_SNAKE_CASE : Optional[int] = timestep_index + 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_A )
if len(self.ets ) == 1:
__SCREAMING_SNAKE_CASE : Any = self.ets[-1]
elif len(self.ets ) == 2:
__SCREAMING_SNAKE_CASE : str = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__SCREAMING_SNAKE_CASE : Dict = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__SCREAMING_SNAKE_CASE : int = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__SCREAMING_SNAKE_CASE : str = self._get_prev_sample(_A , _A , _A , _A )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_A )
def UpperCAmelCase__ ( self : str , _A : torch.FloatTensor , *_A : Any , **_A : List[Any] ):
"""simple docstring"""
return sample
def UpperCAmelCase__ ( self : Dict , _A : Any , _A : Optional[Any] , _A : str , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.alphas[timestep_index]
__SCREAMING_SNAKE_CASE : Tuple = self.betas[timestep_index]
__SCREAMING_SNAKE_CASE : int = self.alphas[prev_timestep_index]
__SCREAMING_SNAKE_CASE : str = self.betas[prev_timestep_index]
__SCREAMING_SNAKE_CASE : List[Any] = (sample - sigma * ets) / max(_A , 1e-8 )
__SCREAMING_SNAKE_CASE : Optional[int] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : str ):
"""simple docstring"""
return self.config.num_train_timesteps
| 74 |
'''simple docstring'''
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 | 0 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowerCamelCase_ ( __a ):
def __init__( self : Optional[int] , _A : Union[str, "sqlalchemy.sql.Selectable"] , _A : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _A : Optional[Features] = None , _A : str = None , _A : bool = False , **_A : Dict , ):
'''simple docstring'''
super().__init__(features=_A , cache_dir=_A , keep_in_memory=_A , **_A )
UpperCAmelCase__ : int = Sql(
cache_dir=_A , features=_A , sql=_A , con=_A , **_A , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : List[str] = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , )
# Build dataset for splits
UpperCAmelCase__ : Optional[int] = self.builder.as_dataset(
split='''train''' , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase_ :
def __init__( self : Any , _A : Dataset , _A : str , _A : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : Tuple , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
UpperCAmelCase__ : Any = dataset
UpperCAmelCase__ : int = name
UpperCAmelCase__ : Union[str, Any] = con
UpperCAmelCase__ : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase__ : int = num_proc
UpperCAmelCase__ : Optional[int] = to_sql_kwargs
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = self.to_sql_kwargs.pop('''sql''' , _A )
UpperCAmelCase__ : int = self.to_sql_kwargs.pop('''con''' , _A )
UpperCAmelCase__ : List[Any] = self.to_sql_kwargs.pop('''index''' , _A )
UpperCAmelCase__ : Optional[int] = self._write(index=_A , **self.to_sql_kwargs )
return written
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = args
UpperCAmelCase__ : int = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
UpperCAmelCase__ : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCAmelCase__ : Tuple = batch.to_pandas()
UpperCAmelCase__ : Tuple = df.to_sql(self.name , self.con , index=_A , **_A )
return num_rows or len(_A )
def lowercase_ ( self : Optional[Any] , _A : Optional[int] , **_A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 75 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__magic_name__ : Tuple =threading.Lock()
__magic_name__ : Optional[logging.Handler] =None
__magic_name__ : List[str] ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__magic_name__ : str =logging.WARNING
__magic_name__ : Any =True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_VERBOSITY" , lowerCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def __snake_case ( ):
'''simple docstring'''
return __name__.split("." )[0]
def __snake_case ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__magic_name__ = logging.StreamHandler() # Set sys.stderr as stream.
__magic_name__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
__magic_name__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
__magic_name__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__magic_name__ = None
def __snake_case ( ):
'''simple docstring'''
return log_levels
def __snake_case ( lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if name is None:
__magic_name__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __snake_case ( lowerCamelCase_ : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCamelCase_ )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
__magic_name__ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , lowerCamelCase_ )
if no_advisory_warnings:
return
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : int =warning_advice
@functools.lru_cache(lowerCamelCase_ )
def __snake_case ( self : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : int ):
'''simple docstring'''
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : Optional[int] =warning_once
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ) -> Any: # pylint: disable=unused-argument
__magic_name__ = args[0] if args else None
def __iter__( self : int ) -> Tuple:
return iter(self._iterator )
def __getattr__( self : List[Any] , _lowerCamelCase : int ) -> List[Any]:
def empty_fn(*_lowerCamelCase : List[str] , **_lowerCamelCase : List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ) -> Any:
return self
def __exit__( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ) -> Dict:
return
class UpperCamelCase_ :
"""simple docstring"""
def __call__( self : Any , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Any ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*_lowerCamelCase , **_lowerCamelCase )
else:
return EmptyTqdm(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : Optional[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ) -> Union[str, Any]:
__magic_name__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : str ) -> Any:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__magic_name__ : List[Any] =_tqdm_cls()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = True
hf_hub_utils.enable_progress_bars()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = False
hf_hub_utils.disable_progress_bars()
| 664 | 0 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
a_ = parse(importlib.metadata.version('torch'))
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
__lowercase : Optional[int] = STR_OPERATION_TO_FUNC[operation]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__lowercase : str = parse(importlib.metadata.version(__UpperCamelCase ) )
return operation(__UpperCamelCase , parse(__UpperCamelCase ) )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
return compare_versions(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
| 76 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Union[str, Any] ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str =[
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = 2 , UpperCamelCase = 1 , UpperCamelCase = 3 , ) -> int | None:
"""simple docstring"""
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
return (pow(UpperCamelCase , 2 ) + step) % modulus
for _ in range(UpperCamelCase ):
# These track the position within the cycle detection logic.
__UpperCAmelCase : int = seed
__UpperCAmelCase : Tuple = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__UpperCAmelCase : Tuple = rand_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Tuple = rand_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : str = rand_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__UpperCAmelCase : Union[str, Any] = gcd(hare - tortoise , UpperCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__UpperCAmelCase : List[str] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
A = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
A = parser.parse_args()
A = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'''{args.num} is probably prime''')
else:
A = args.num // divisor
print(f'''{args.num} = {divisor} * {quotient}''')
| 77 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ : Optional[Any] ={
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int =['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict =[
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple =[
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__magic_name__ : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
SCREAMING_SNAKE_CASE_: Optional[int] =8
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any]=BITS ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = x.device
UpperCAmelCase_ = (x * 2_55).int().clamp(0 , 2_55 )
UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case_ )
UpperCAmelCase_ = rearrange(snake_case_ , "d -> d 1 1" )
UpperCAmelCase_ = rearrange(snake_case_ , "b c h w -> b c 1 h w" )
UpperCAmelCase_ = ((x & mask) != 0).float()
UpperCAmelCase_ = rearrange(snake_case_ , "b c d h w -> b (c d) h w" )
UpperCAmelCase_ = bits * 2 - 1
return bits
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : str=BITS ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = x.device
UpperCAmelCase_ = (x > 0).int()
UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case_ , dtype=torch.intaa )
UpperCAmelCase_ = rearrange(snake_case_ , "d -> d 1 1" )
UpperCAmelCase_ = rearrange(snake_case_ , "b (c d) h w -> b c d h w" , d=8 )
UpperCAmelCase_ = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 2_55).clamp(0.0 , 1.0 )
def lowerCAmelCase_ ( self : Optional[int] , snake_case_ : torch.FloatTensor , snake_case_ : int , snake_case_ : torch.FloatTensor , snake_case_ : float = 0.0 , snake_case_ : bool = True , snake_case_ : Optional[int]=None , snake_case_ : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
UpperCAmelCase_ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
UpperCAmelCase_ = self.alphas_cumprod[timestep]
UpperCAmelCase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
UpperCAmelCase_ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
UpperCAmelCase_ = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ = torch.clamp(snake_case_ , -scale , snake_case_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
UpperCAmelCase_ = self._get_variance(snake_case_ , snake_case_ )
UpperCAmelCase_ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
UpperCAmelCase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
UpperCAmelCase_ = model_output.device if torch.is_tensor(snake_case_ ) else "cpu"
UpperCAmelCase_ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case_ ).to(snake_case_ )
UpperCAmelCase_ = self._get_variance(snake_case_ , snake_case_ ) ** 0.5 * eta * noise
UpperCAmelCase_ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=snake_case_ , pred_original_sample=snake_case_ )
def lowerCAmelCase_ ( self : Union[str, Any] , snake_case_ : torch.FloatTensor , snake_case_ : int , snake_case_ : torch.FloatTensor , snake_case_ : List[Any]="epsilon" , snake_case_ : List[str]=None , snake_case_ : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
'''simple docstring'''
UpperCAmelCase_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
UpperCAmelCase_ , UpperCAmelCase_ = torch.split(snake_case_ , sample.shape[1] , dim=1 )
else:
UpperCAmelCase_ = None
# 1. compute alphas, betas
UpperCAmelCase_ = self.alphas_cumprod[t]
UpperCAmelCase_ = self.alphas_cumprod[t - 1] if t > 0 else self.one
UpperCAmelCase_ = 1 - alpha_prod_t
UpperCAmelCase_ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
UpperCAmelCase_ = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ = torch.clamp(snake_case_ , -scale , snake_case_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
UpperCAmelCase_ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase_ = 0
if t > 0:
UpperCAmelCase_ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case_ ).to(model_output.device )
UpperCAmelCase_ = (self._get_variance(snake_case_ , predicted_variance=snake_case_ ) ** 0.5) * noise
UpperCAmelCase_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=snake_case_ , pred_original_sample=snake_case_ )
class __A ( UpperCamelCase__ ):
def __init__(self : str , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, DDPMScheduler] , __a : Optional[float] = 1.0 , ):
super().__init__()
UpperCAmelCase_ = bit_scale
UpperCAmelCase_ = (
ddim_bit_scheduler_step if isinstance(__a , __a ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__(self : Union[str, Any] , __a : Optional[int] = 256 , __a : Optional[int] = 256 , __a : Optional[int] = 50 , __a : Optional[torch.Generator] = None , __a : Optional[int] = 1 , __a : Optional[str] = "pil" , __a : bool = True , **__a : Tuple , ):
UpperCAmelCase_ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__a , )
UpperCAmelCase_ = decimal_to_bits(__a ) * self.bit_scale
UpperCAmelCase_ = latents.to(self.device )
self.scheduler.set_timesteps(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
UpperCAmelCase_ = self.unet(__a , __a ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(__a , __a , __a ).prev_sample
UpperCAmelCase_ = bits_to_decimal(__a )
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 78 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__magic_name__ : str ={
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__magic_name__ : Tuple ={
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = (images / 2 + 0.5).clamp(0 , 1 )
__magic_name__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ = numpy_to_pil(lowerCamelCase_ )
return images
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
if images.ndim == 3:
__magic_name__ = images[None, ...]
__magic_name__ = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__magic_name__ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
__magic_name__ = [Image.fromarray(lowerCamelCase_ ) for image in images]
return pil_images
| 664 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _lowerCamelCase ( __lowerCamelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = 384
if "tiny" in model_name:
UpperCAmelCase__ : Dict = [3, 3, 9, 3]
UpperCAmelCase__ : int = [96, 192, 384, 768]
if "small" in model_name:
UpperCAmelCase__ : Optional[int] = [3, 3, 27, 3]
UpperCAmelCase__ : Dict = [96, 192, 384, 768]
if "base" in model_name:
UpperCAmelCase__ : str = [3, 3, 27, 3]
UpperCAmelCase__ : Optional[Any] = [128, 256, 512, 1024]
UpperCAmelCase__ : int = 512
if "large" in model_name:
UpperCAmelCase__ : List[str] = [3, 3, 27, 3]
UpperCAmelCase__ : Tuple = [192, 384, 768, 1536]
UpperCAmelCase__ : Any = 768
if "xlarge" in model_name:
UpperCAmelCase__ : int = [3, 3, 27, 3]
UpperCAmelCase__ : int = [256, 512, 1024, 2048]
UpperCAmelCase__ : int = 1024
# set label information
UpperCAmelCase__ : Tuple = 150
UpperCAmelCase__ : int = """huggingface/label-files"""
UpperCAmelCase__ : Tuple = """ade20k-id2label.json"""
UpperCAmelCase__ : int = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase__ : Any = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Dict = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : Optional[int] = ConvNextConfig(
depths=__lowerCamelCase , hidden_sizes=__lowerCamelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
UpperCAmelCase__ : str = UperNetConfig(
backbone_config=__lowerCamelCase , auxiliary_in_channels=__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , )
return config
def _lowerCamelCase ( __lowerCamelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Dict = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
'''simple docstring'''
UpperCAmelCase__ : str = dct.pop(__lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = val
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
UpperCAmelCase__ : List[Any] = model_name_to_url[model_name]
UpperCAmelCase__ : List[Any] = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""state_dict"""]
UpperCAmelCase__ : Dict = get_upernet_config(__lowerCamelCase )
UpperCAmelCase__ : int = UperNetForSemanticSegmentation(__lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase__ : Optional[Any] = state_dict.pop(__lowerCamelCase )
if "bn" in key:
UpperCAmelCase__ : int = key.replace("""bn""" , """batch_norm""" )
UpperCAmelCase__ : str = val
# rename keys
UpperCAmelCase__ : Optional[Any] = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# verify on image
UpperCAmelCase__ : Tuple = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
UpperCAmelCase__ : Tuple = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" )
UpperCAmelCase__ : Dict = SegformerImageProcessor()
UpperCAmelCase__ : Union[str, Any] = processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
UpperCAmelCase__ : Any = model(__lowerCamelCase )
if model_name == "upernet-convnext-tiny":
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
UpperCAmelCase__ : int = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 79 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(
A , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : Any , _lowerCamelCase : GenericTensor ) -> np.ndarray:
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def __A ( self : str , _lowerCamelCase : GenericTensor ) -> np.ndarray:
__magic_name__ = self.get_masked_index(_lowerCamelCase )
__magic_name__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __A ( self : int , _lowerCamelCase : GenericTensor ) -> Any:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] ) -> Dict[str, GenericTensor]:
if return_tensors is None:
__magic_name__ = self.framework
__magic_name__ = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase )
self.ensure_exactly_one_mask_token(_lowerCamelCase )
return model_inputs
def __A ( self : List[str] , _lowerCamelCase : int ) -> List[Any]:
__magic_name__ = self.model(**_lowerCamelCase )
__magic_name__ = model_inputs["input_ids"]
return model_outputs
def __A ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Dict=None ) -> Dict:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__magic_name__ = target_ids.shape[0]
__magic_name__ = model_outputs["input_ids"][0]
__magic_name__ = model_outputs["logits"]
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__magic_name__ = outputs.numpy()
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = stable_softmax(_lowerCamelCase , axis=-1 )
if target_ids is not None:
__magic_name__ = tf.gather_nd(tf.squeeze(_lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
__magic_name__ = tf.expand_dims(_lowerCamelCase , 0 )
__magic_name__ = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
__magic_name__ , __magic_name__ = topk.values.numpy(), topk.indices.numpy()
else:
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = logits.softmax(dim=-1 )
if target_ids is not None:
__magic_name__ = probs[..., target_ids]
__magic_name__ , __magic_name__ = probs.topk(_lowerCamelCase )
__magic_name__ = []
__magic_name__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__magic_name__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__magic_name__ = input_ids.numpy().copy()
if target_ids is not None:
__magic_name__ = target_ids[p].tolist()
__magic_name__ = p
# Filter padding out:
__magic_name__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__magic_name__ = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
__magic_name__ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(_lowerCamelCase )
result.append(_lowerCamelCase )
if single_mask:
return result[0]
return result
def __A ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=None ) -> List[str]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = [targets]
try:
__magic_name__ = self.tokenizer.get_vocab()
except Exception:
__magic_name__ = {}
__magic_name__ = []
for target in targets:
__magic_name__ = vocab.get(_lowerCamelCase , _lowerCamelCase )
if id_ is None:
__magic_name__ = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , max_length=1 , truncation=_lowerCamelCase , )["input_ids"]
if len(_lowerCamelCase ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"We cannot replace it with anything meaningful, ignoring it" )
continue
__magic_name__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__magic_name__ = list(set(_lowerCamelCase ) )
if len(_lowerCamelCase ) == 0:
raise ValueError("At least one target must be provided when passed." )
__magic_name__ = np.array(_lowerCamelCase )
return target_ids
def __A ( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : int=None ) -> Tuple:
__magic_name__ = {}
if targets is not None:
__magic_name__ = self.get_target_ids(_lowerCamelCase , _lowerCamelCase )
__magic_name__ = target_ids
if top_k is not None:
__magic_name__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : int , _lowerCamelCase : Any , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Optional[int]:
__magic_name__ = super().__call__(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 664 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __UpperCamelCase ( unittest.TestCase ):
__snake_case :Union[str, Any] = StableDiffusionLDMaDPipeline
__snake_case :List[str] = TEXT_TO_IMAGE_PARAMS
__snake_case :List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case :str = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__lowercase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowercase = CLIPTextModel(_lowerCAmelCase )
__lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Dict=0 ) -> List[Any]:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableDiffusionLDMaDPipeline(**_lowerCAmelCase )
__lowercase = ldmad_pipe.to(_lowerCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs(_lowerCAmelCase )
__lowercase = ldmad_pipe(**_lowerCAmelCase )
__lowercase , __lowercase = output.rgb, output.depth
__lowercase = rgb[0, -3:, -3:, -1]
__lowercase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
__lowercase = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
__lowercase = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.get_dummy_components()
__lowercase = StableDiffusionLDMaDPipeline(**_lowerCAmelCase )
__lowercase = ldmad_pipe.to(_lowerCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs(_lowerCAmelCase )
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = ldmad_pipe(**_lowerCAmelCase )
__lowercase , __lowercase = output.rgb, output.depth
__lowercase = rgb_slice_a[0, -3:, -3:, -1]
__lowercase = depth_slice_a[0, -3:, -1]
__lowercase = self.get_dummy_inputs(_lowerCAmelCase )
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = ldmad_pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""pt""" , )
__lowercase = text_inputs["""input_ids"""].to(_lowerCAmelCase )
__lowercase = ldmad_pipe.text_encoder(_lowerCAmelCase )[0]
__lowercase = prompt_embeds
# forward
__lowercase = ldmad_pipe(**_lowerCAmelCase )
__lowercase , __lowercase = output.rgb, output.depth
__lowercase = rgb_slice_a[0, -3:, -3:, -1]
__lowercase = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def _a ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
__lowercase = StableDiffusionLDMaDPipeline(**_lowerCAmelCase )
__lowercase = ldmad_pipe.to(_lowerCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs(_lowerCAmelCase )
__lowercase = """french fries"""
__lowercase = ldmad_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
__lowercase , __lowercase = output.rgb, output.depth
__lowercase = rgb[0, -3:, -3:, -1]
__lowercase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
__lowercase = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
__lowercase = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]="cpu" , _lowerCAmelCase : int=torch.floataa , _lowerCAmelCase : int=0 ) -> Optional[int]:
"""simple docstring"""
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = np.random.RandomState(_lowerCAmelCase ).standard_normal((1, 4, 64, 64) )
__lowercase = torch.from_numpy(_lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase )
__lowercase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
__lowercase = ldmad_pipe.to(_lowerCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_inputs(_lowerCAmelCase )
__lowercase = ldmad_pipe(**_lowerCAmelCase )
__lowercase , __lowercase = output.rgb, output.depth
__lowercase = rgb[0, -3:, -3:, -1].flatten()
__lowercase = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
__lowercase = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
__lowercase = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : int ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]="cpu" , _lowerCAmelCase : Dict=torch.floataa , _lowerCAmelCase : Optional[int]=0 ) -> Tuple:
"""simple docstring"""
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = np.random.RandomState(_lowerCAmelCase ).standard_normal((1, 4, 64, 64) )
__lowercase = torch.from_numpy(_lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase )
__lowercase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(_lowerCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_inputs(_lowerCAmelCase )
__lowercase = ldmad_pipe(**_lowerCAmelCase )
__lowercase , __lowercase = output.rgb, output.depth
__lowercase = 0.495_586
__lowercase = 0.33_795_515
__lowercase = 112.48_518
__lowercase = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(_lowerCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_inputs(_lowerCAmelCase )
__lowercase = ldmad_pipe(**_lowerCAmelCase )
__lowercase , __lowercase = output.rgb, output.depth
__lowercase = 0.4_194_127
__lowercase = 0.35_375_586
__lowercase = 0.5_638_502
__lowercase = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 80 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
'''simple docstring'''
if len(lowerCamelCase_ ) < k or k < 0:
raise ValueError("Invalid Input" )
__magic_name__ = __magic_name__ = sum(array[:k] )
for i in range(len(lowerCamelCase_ ) - k ):
__magic_name__ = current_sum - array[i] + array[i + k]
__magic_name__ = max(lowerCamelCase_ , lowerCamelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__magic_name__ : List[str] =[randint(-10_00, 10_00) for i in range(1_00)]
__magic_name__ : List[str] =randint(0, 1_10)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 664 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = (DDIMParallelScheduler,)
__UpperCAmelCase : Tuple = (("eta", 0.0), ("num_inference_steps", 50))
def __snake_case ( self : int , **lowerCamelCase : Optional[Any] ) -> Optional[Any]:
__snake_case : Any = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**lowerCamelCase )
return config
def __snake_case ( self : Optional[Any] , **lowerCamelCase : List[str] ) -> Any:
__snake_case : Optional[int] = self.scheduler_classes[0]
__snake_case : Optional[int] = self.get_scheduler_config(**lowerCamelCase )
__snake_case : Optional[int] = scheduler_class(**lowerCamelCase )
__snake_case , __snake_case : List[Any] = 10, 0.0
__snake_case : Union[str, Any] = self.dummy_model()
__snake_case : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for t in scheduler.timesteps:
__snake_case : List[Any] = model(lowerCamelCase , lowerCamelCase )
__snake_case : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
return sample
def __snake_case ( self : Tuple ) -> Dict:
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def __snake_case ( self : Dict ) -> int:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase )
__snake_case : Optional[int] = self.scheduler_classes[0]
__snake_case : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
__snake_case : List[str] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def __snake_case ( self : List[Any] ) -> int:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def __snake_case ( self : List[Any] ) -> str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def __snake_case ( self : Tuple ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase )
def __snake_case ( self : Tuple ) -> List[Any]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCamelCase )
def __snake_case ( self : Tuple ) -> Any:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCamelCase )
def __snake_case ( self : Dict ) -> List[str]:
self.check_over_configs(thresholding=lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase , prediction_type=lowerCamelCase , sample_max_value=lowerCamelCase , )
def __snake_case ( self : int ) -> Union[str, Any]:
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCamelCase )
def __snake_case ( self : int ) -> List[Any]:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowerCamelCase , num_inference_steps=lowerCamelCase )
def __snake_case ( self : Dict ) -> str:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCamelCase , eta=lowerCamelCase )
def __snake_case ( self : Any ) -> Any:
__snake_case : Any = self.scheduler_classes[0]
__snake_case : Dict = self.get_scheduler_config()
__snake_case : Optional[int] = scheduler_class(**lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def __snake_case ( self : Dict ) -> List[Any]:
__snake_case : Any = self.scheduler_classes[0]
__snake_case : Dict = self.get_scheduler_config()
__snake_case : Optional[int] = scheduler_class(**lowerCamelCase )
__snake_case , __snake_case : str = 10, 0.0
scheduler.set_timesteps(lowerCamelCase )
__snake_case : Dict = self.dummy_model()
__snake_case : List[Any] = self.dummy_sample_deter
__snake_case : int = self.dummy_sample_deter + 0.1
__snake_case : str = self.dummy_sample_deter - 0.1
__snake_case : Dict = samplea.shape[0]
__snake_case : str = torch.stack([samplea, samplea, samplea] , dim=0 )
__snake_case : int = torch.arange(lowerCamelCase )[0:3, None].repeat(1 , lowerCamelCase )
__snake_case : Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__snake_case : Any = scheduler.batch_step_no_noise(lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowerCamelCase )
__snake_case : Optional[Any] = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : List[Any] = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def __snake_case ( self : str ) -> Optional[int]:
__snake_case : Union[str, Any] = self.full_loop()
__snake_case : Dict = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Dict = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def __snake_case ( self : str ) -> Dict:
__snake_case : Any = self.full_loop(prediction_type="v_prediction" )
__snake_case : Optional[int] = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : int = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def __snake_case ( self : Dict ) -> Tuple:
# We specify different beta, so that the first alpha is 0.99
__snake_case : str = self.full_loop(set_alpha_to_one=lowerCamelCase , beta_start=0.01 )
__snake_case : Dict = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Dict = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def __snake_case ( self : Optional[Any] ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
__snake_case : Union[str, Any] = self.full_loop(set_alpha_to_one=lowerCamelCase , beta_start=0.01 )
__snake_case : Optional[Any] = torch.sum(torch.abs(lowerCamelCase ) )
__snake_case : Dict = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 81 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : int =logging.get_logger(__name__)
__magic_name__ : List[Any] ={}
class UpperCamelCase_ ( A ):
"""simple docstring"""
UpperCAmelCase__ : int = '''llama'''
UpperCAmelCase__ : Any = ['''past_key_values''']
def __init__( self : List[Any] , _lowerCamelCase : List[Any]=3_20_00 , _lowerCamelCase : Optional[Any]=40_96 , _lowerCamelCase : Tuple=1_10_08 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="silu" , _lowerCamelCase : Optional[Any]=20_48 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Union[str, Any]=1e-6 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Dict=0 , _lowerCamelCase : int=1 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[str]=None , **_lowerCamelCase : List[Any] , ) -> Any:
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = intermediate_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__magic_name__ = num_attention_heads
__magic_name__ = num_key_value_heads
__magic_name__ = hidden_act
__magic_name__ = initializer_range
__magic_name__ = rms_norm_eps
__magic_name__ = pretraining_tp
__magic_name__ = use_cache
__magic_name__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def __A ( self : Union[str, Any] ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
__magic_name__ = self.rope_scaling.get("type" , _lowerCamelCase )
__magic_name__ = self.rope_scaling.get("factor" , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 664 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Any=18 , _UpperCAmelCase : int=30 , _UpperCAmelCase : Tuple=400 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : int=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = size if size is not None else {"height": 20, "width": 20}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_convert_rgb
UpperCAmelCase_ = [512, 1024, 2048, 4096]
UpperCAmelCase_ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase_ = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self )
@property
def lowercase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ = 2048
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase_ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
UpperCAmelCase_ = "Hello"
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase_ = 3
@property
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_convert_rgb" ) )
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ = image_processor(
_UpperCAmelCase , return_tensors="pt" , max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 82 |
'''simple docstring'''
__magic_name__ : Dict =8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 664 | 0 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
lowerCAmelCase__ = get_logger(__name__)
class __snake_case :
def __init__( self : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ):
"""simple docstring"""
_lowerCamelCase : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase : List[str] = module._original_module if isinstance(__lowerCAmelCase , _PatchedModuleObj ) else module
class __snake_case :
snake_case__ : Optional[Any] = []
def __init__( self : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int]=None ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = obj
_lowerCamelCase : Tuple = target
_lowerCamelCase : Optional[Any] = new
_lowerCamelCase : Optional[Any] = target.split('''.''' )[0]
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Optional[Any] = attrs or []
def __enter__( self : Optional[Any] ):
"""simple docstring"""
*_lowerCamelCase , _lowerCamelCase : str = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__lowerCAmelCase ) ):
try:
_lowerCamelCase : Tuple = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_lowerCamelCase : Optional[int] = getattr(self.obj , __lowerCAmelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__lowerCAmelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_lowerCamelCase : Optional[Any] = obj_attr
# patch at top level
setattr(self.obj , __lowerCAmelCase , _PatchedModuleObj(__lowerCAmelCase , attrs=self.attrs ) )
_lowerCamelCase : Tuple = getattr(self.obj , __lowerCAmelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__lowerCAmelCase , __lowerCAmelCase , _PatchedModuleObj(getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , attrs=self.attrs ) )
_lowerCamelCase : List[Any] = getattr(__lowerCAmelCase , __lowerCAmelCase )
# finally set the target attribute
setattr(__lowerCAmelCase , __lowerCAmelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_lowerCamelCase : List[str] = getattr(import_module('''.'''.join(__lowerCAmelCase ) ) , __lowerCAmelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __lowerCAmelCase ) is attr_value:
_lowerCamelCase : Dict = getattr(self.obj , __lowerCAmelCase )
setattr(self.obj , __lowerCAmelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_lowerCamelCase : str = globals()['''__builtins__'''][target_attr]
setattr(self.obj , __lowerCAmelCase , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self : Optional[int] , *__lowerCAmelCase : Dict ):
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , __lowerCAmelCase , self.original.pop(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 83 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__magic_name__ : List[Any] =logging.getLogger(__name__)
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : str=-1 ) -> List[str]:
# in NER datasets, the last column is usually reserved for NER label
__magic_name__ = label_idx
def __A ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
__magic_name__ = []
__magic_name__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
__magic_name__ = []
__magic_name__ = []
else:
__magic_name__ = line.split(" " )
words.append(splits[0] )
if len(_lowerCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
return examples
def __A ( self : Optional[Any] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Union[str, Any]:
__magic_name__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(_lowerCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__magic_name__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_lowerCamelCase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __A ( self : Tuple , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : int ) -> str:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __A ( self : int , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = []
__magic_name__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
return examples
def __A ( self : Optional[int] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Any:
__magic_name__ = 0
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = preds_list[example_id]
__magic_name__ = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(_lowerCamelCase )
example_id += 1
def __A ( self : Dict , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 664 | 0 |
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
lowercase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowercase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__SCREAMING_SNAKE_CASE ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__SCREAMING_SNAKE_CASE ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowercase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__SCREAMING_SNAKE_CASE )
return next_generation
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
for _ in range(__SCREAMING_SNAKE_CASE ):
# Create output image
lowercase = Image.new('RGB' , (len(cells[0] ), len(__SCREAMING_SNAKE_CASE )) )
lowercase = img.load()
# Save cells to image
for x in range(len(__SCREAMING_SNAKE_CASE ) ):
for y in range(len(cells[0] ) ):
lowercase = 255 - cells[y][x] * 255
lowercase = (colour, colour, colour)
# Save image
images.append(__SCREAMING_SNAKE_CASE )
lowercase = new_generation(__SCREAMING_SNAKE_CASE )
return images
if __name__ == "__main__":
UpperCAmelCase = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 84 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0 ) -> None:
__magic_name__ , __magic_name__ = row, column
__magic_name__ = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self : Optional[Any] ) -> str:
__magic_name__ = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__magic_name__ = 0
for row_vector in self.array:
for obj in row_vector:
__magic_name__ = max(_lowerCamelCase , len(str(_lowerCamelCase ) ) )
__magic_name__ = f'%{max_element_length}s'
# Make string and return
def single_line(_lowerCamelCase : list[float] ) -> str:
nonlocal string_format_identifier
__magic_name__ = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
return str(self )
def __A ( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ) -> bool:
if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] , _lowerCamelCase : tuple[int, int] ) -> Any:
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : float ) -> None:
assert self.validate_indicies(_lowerCamelCase )
__magic_name__ = value
def __add__( self : Union[str, Any] , _lowerCamelCase : Matrix ) -> Matrix:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = -self[r, c]
return result
def __sub__( self : Optional[int] , _lowerCamelCase : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Optional[int] , _lowerCamelCase : int | float | Matrix ) -> Matrix:
if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__magic_name__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__magic_name__ = f'Unsupported type given for another ({type(_lowerCamelCase )})'
raise TypeError(_lowerCamelCase )
def __A ( self : Optional[int] ) -> Matrix:
__magic_name__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c]
return result
def __A ( self : int , _lowerCamelCase : Matrix , _lowerCamelCase : Matrix ) -> Any:
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__magic_name__ = v.transpose()
__magic_name__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
__magic_name__ = 1
print(F'a^(-1) is {ainv}' )
# u, v
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 1, 2, -3
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}' )
def __snake_case ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 664 | 0 |
import torch
from diffusers import DiffusionPipeline
class snake_case ( UpperCamelCase_ ):
def __init__( self : Any , a_ : Optional[int] , a_ : Dict )-> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=a_ , scheduler=a_ )
def __call__( self : Optional[int] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : Tuple = self.unet(a_ , a_ ).sample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.scheduler.step(a_ , a_ , a_ ).prev_sample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler_output - scheduler_output + torch.ones_like(a_ )
return result
| 85 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__magic_name__ : List[Any] =logging.getLogger(__name__)
__magic_name__ : int ='Hello world! cécé herlolip'
__magic_name__ : List[Any] =namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def __snake_case ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = BertAbsConfig(
temp_dir="." , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__magic_name__ = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage )
__magic_name__ = AbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) , lowerCamelCase_ )
original.eval()
__magic_name__ = BertAbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
__magic_name__ = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
__magic_name__ = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
__magic_name__ = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__magic_name__ = encoder_input_ids
__magic_name__ = decoder_input_ids
__magic_name__ = __magic_name__ = None
__magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__magic_name__ = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = original.generator(lowerCamelCase_ )
__magic_name__ = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = new_model.generator(lowerCamelCase_ )
__magic_name__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__magic_name__ : Dict =argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__magic_name__ : Any =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 664 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def __snake_case ( __UpperCamelCase : bool = True ,*__UpperCamelCase : Tuple ,**__UpperCamelCase : Dict ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
A_ = False
if main_process_only:
A_ = PartialState().local_process_index == 0
return _tqdm(*__UpperCamelCase ,**__UpperCamelCase ,disable=__UpperCamelCase ) | 86 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> str:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __A ( self : List[Any] ) -> str:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(_lowerCamelCase ) # fails here
def __A ( self : List[Any] ) -> int:
__magic_name__ = [[1, 2, 3], [1, 2, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(3 )
__magic_name__ = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __A ( self : Any ) -> Union[str, Any]:
__magic_name__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 664 | 0 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->Tuple:
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''')
A__ = img
A__ = img.shape[1]
A__ = img.shape[0]
A__ = dst_width
A__ = dst_height
A__ = self.src_w / self.dst_w
A__ = self.src_h / self.dst_h
A__ = A__ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 255
)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
for i in range(self.dst_h):
for j in range(self.dst_w):
A__ = self.img[self.get_y(UpperCAmelCase__)][self.get_x(UpperCAmelCase__)]
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : int) ->int:
'''simple docstring'''
return int(self.ratio_x * x)
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : int) ->int:
'''simple docstring'''
return int(self.ratio_y * y)
if __name__ == "__main__":
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = 800, 600
_lowerCamelCase : Any = imread("""image_data/lena.jpg""", 1)
_lowerCamelCase : int = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 87 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__magic_name__ : Dict ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Union[str, Any]:
__magic_name__ = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def __A ( cls : Any ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def __A ( self : Optional[Any] ) -> Dict:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCamelCase , repo_id="test-config" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : str ) -> Optional[int]:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_lowerCamelCase , repo_id="valid_org/test-config-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : Optional[int] ) -> Union[str, Any]:
CustomConfig.register_for_auto_class()
__magic_name__ = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
__magic_name__ = AutoConfig.from_pretrained(f'{USER}/test-dynamic-config' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Optional[Any]:
__magic_name__ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__magic_name__ = c.n_embd + 1 # int
__magic_name__ = c.resid_pdrop + 1.0 # float
__magic_name__ = not c.scale_attn_weights # bool
__magic_name__ = c.summary_type + "foo" # str
c.update_from_string(
f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(_lowerCamelCase , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(_lowerCamelCase , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(_lowerCamelCase , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(_lowerCamelCase , c.summary_type , "mismatch for key: summary_type" )
def __A ( self : List[Any] ) -> Union[str, Any]:
__magic_name__ = PretrainedConfig()
__magic_name__ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_lowerCamelCase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
__magic_name__ = [key for key, value in config_common_kwargs.items() if value == getattr(_lowerCamelCase , _lowerCamelCase )]
if len(_lowerCamelCase ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f' {", ".join(_lowerCamelCase )}.' )
def __A ( self : List[Any] ) -> List[Any]:
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(_lowerCamelCase )
def __A ( self : Tuple ) -> int:
# A mock response for an HTTP head request to emulate server down
__magic_name__ = mock.Mock()
__magic_name__ = 5_00
__magic_name__ = {}
__magic_name__ = HTTPError
__magic_name__ = {}
# Download this model to make sure it's in the cache.
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Union[str, Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
__magic_name__ = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def __A ( self : Dict ) -> Optional[int]:
__magic_name__ = AutoConfig.from_pretrained("bert-base-cased" )
__magic_name__ = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_lowerCamelCase )
__magic_name__ = 2
json.dump(configuration.to_dict() , open(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__magic_name__ = ["config.42.0.0.json"]
__magic_name__ = 7_68
configuration.save_pretrained(_lowerCamelCase )
shutil.move(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , os.path.join(_lowerCamelCase , "config.42.0.0.json" ) )
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def __A ( self : Optional[int] ) -> str:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__magic_name__ = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__magic_name__ = "v4.0.0"
__magic_name__ , __magic_name__ = new_transformers.models.auto.AutoConfig.from_pretrained(
_lowerCamelCase , return_unused_kwargs=_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_lowerCamelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__magic_name__ = "v3.0.0"
__magic_name__ = old_transformers.models.auto.AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 664 | 0 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _snake_case ( __snake_case : list[list[int]] , __snake_case : list[int] , __snake_case : list[int] , __snake_case : int , __snake_case : list[list[int]] , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) )
] # the reference grid
_lowerCamelCase : Union[str, Any] = 1
_lowerCamelCase : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) )
] # the action grid
_lowerCamelCase : int = init[0]
_lowerCamelCase : Union[str, Any] = init[1]
_lowerCamelCase : Dict = 0
_lowerCamelCase : List[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
_lowerCamelCase : int = [[f, g, x, y]]
_lowerCamelCase : Dict = False # flag that is set when search is complete
_lowerCamelCase : List[str] = False # flag set if we can't find expand
while not found and not resign:
if len(__snake_case ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_lowerCamelCase : Any = cell.pop()
_lowerCamelCase : Any = next_cell[2]
_lowerCamelCase : List[str] = next_cell[3]
_lowerCamelCase : str = next_cell[1]
if x == goal[0] and y == goal[1]:
_lowerCamelCase : int = True
else:
for i in range(len(__snake_case ) ): # to try out different valid actions
_lowerCamelCase : int = x + DIRECTIONS[i][0]
_lowerCamelCase : Optional[int] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__snake_case ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_lowerCamelCase : str = g + cost
_lowerCamelCase : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = i
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : List[Any] = goal[0]
_lowerCamelCase : Optional[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_lowerCamelCase : Tuple = x - DIRECTIONS[action[x][y]][0]
_lowerCamelCase : List[Any] = y - DIRECTIONS[action[x][y]][1]
_lowerCamelCase : Union[str, Any] = xa
_lowerCamelCase : List[str] = ya
invpath.append([x, y] )
_lowerCamelCase : Optional[Any] = []
for i in range(len(__snake_case ) ):
path.append(invpath[len(__snake_case ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase = 99
UpperCAmelCase , UpperCAmelCase = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 88 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]=7 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[Any]=18 , _lowerCamelCase : Union[str, Any]=30 , _lowerCamelCase : Tuple=4_00 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=True , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , ) -> Dict:
__magic_name__ = size if size is not None else {"height": 18, "width": 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
def __A ( self : int ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( A , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = DPTImageProcessor if is_vision_available() else None
def __A ( self : Dict ) -> Any:
__magic_name__ = DPTImageProcessingTester(self )
@property
def __A ( self : str ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> List[str]:
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size" ) )
def __A ( self : List[str] ) -> List[Any]:
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __A ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Dict ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Optional[int] ) -> Dict:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 664 | 0 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
enable_full_determinism()
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Optional[int] = UNetaDModel
lowercase_ : List[str] = """sample"""
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Tuple = 4
_lowercase : Any = 3
_lowercase : List[str] = (32, 32)
_lowercase : str = floats_tensor((batch_size, num_channels) + sizes).to(lowerCamelCase)
_lowercase : List[str] = torch.tensor([10]).to(lowerCamelCase)
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (3, 32, 32)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
_lowercase : int = self.dummy_input
return init_dict, inputs_dict
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : List[str] = UNetaDModel
lowercase_ : List[Any] = """sample"""
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Tuple = 4
_lowercase : int = 4
_lowercase : Tuple = (32, 32)
_lowercase : Any = floats_tensor((batch_size, num_channels) + sizes).to(lowerCamelCase)
_lowercase : Any = torch.tensor([10]).to(lowerCamelCase)
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
return (4, 32, 32)
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return (4, 32, 32)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
_lowercase : int = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : str = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update', output_loading_info=lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
self.assertEqual(len(loading_info['missing_keys']), 0)
model.to(lowerCamelCase)
_lowercase : List[str] = model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda', 'This test is supposed to run on GPU')
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : Dict = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update', output_loading_info=lowerCamelCase)
model.to(lowerCamelCase)
_lowercase : Optional[Any] = model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda', 'This test is supposed to run on GPU')
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : Tuple = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update', output_loading_info=lowerCamelCase)
model_accelerate.to(lowerCamelCase)
model_accelerate.eval()
_lowercase : Union[str, Any] = torch.randn(
1, model_accelerate.config.in_channels, model_accelerate.config.sample_size, model_accelerate.config.sample_size, generator=torch.manual_seed(0), )
_lowercase : List[str] = noise.to(lowerCamelCase)
_lowercase : List[Any] = torch.tensor([10] * noise.shape[0]).to(lowerCamelCase)
_lowercase : str = model_accelerate(lowerCamelCase, lowerCamelCase)['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_lowercase , _lowercase : Union[str, Any] = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update', output_loading_info=lowerCamelCase, low_cpu_mem_usage=lowerCamelCase)
model_normal_load.to(lowerCamelCase)
model_normal_load.eval()
_lowercase : Dict = model_normal_load(lowerCamelCase, lowerCamelCase)['sample']
assert torch_all_close(lowerCamelCase, lowerCamelCase, rtol=1E-3)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update')
model.eval()
model.to(lowerCamelCase)
_lowercase : List[Any] = torch.randn(
1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0), )
_lowercase : Dict = noise.to(lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([10] * noise.shape[0]).to(lowerCamelCase)
with torch.no_grad():
_lowercase : Dict = model(lowerCamelCase, lowerCamelCase).sample
_lowercase : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_lowercase : Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0])
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase, lowerCamelCase, rtol=1E-3))
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : str = UNetaDModel
lowercase_ : Any = """sample"""
@property
def UpperCamelCase ( self, lowerCamelCase=(32, 32)) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = 4
_lowercase : int = 3
_lowercase : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes).to(lowerCamelCase)
_lowercase : List[Any] = torch.tensor(batch_size * [10]).to(dtype=torch.intaa, device=lowerCamelCase)
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return (3, 32, 32)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Tuple = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
_lowercase : List[Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase , _lowercase : Optional[int] = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256', output_loading_info=lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
self.assertEqual(len(loading_info['missing_keys']), 0)
model.to(lowerCamelCase)
_lowercase : List[str] = self.dummy_input
_lowercase : Dict = floats_tensor((4, 3) + (2_56, 2_56)).to(lowerCamelCase)
_lowercase : Optional[Any] = noise
_lowercase : List[Any] = model(**lowerCamelCase)
assert image is not None, "Make sure output is not None"
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256')
model.to(lowerCamelCase)
_lowercase : Tuple = 4
_lowercase : List[str] = 3
_lowercase : str = (2_56, 2_56)
_lowercase : int = torch.ones((batch_size, num_channels) + sizes).to(lowerCamelCase)
_lowercase : List[str] = torch.tensor(batch_size * [1E-4]).to(lowerCamelCase)
with torch.no_grad():
_lowercase : Dict = model(lowerCamelCase, lowerCamelCase).sample
_lowercase : Optional[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_lowercase : Tuple = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8])
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase, lowerCamelCase, rtol=1E-2))
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update')
model.to(lowerCamelCase)
_lowercase : Optional[int] = 4
_lowercase : Optional[Any] = 3
_lowercase : Optional[int] = (32, 32)
_lowercase : str = torch.ones((batch_size, num_channels) + sizes).to(lowerCamelCase)
_lowercase : Tuple = torch.tensor(batch_size * [1E-4]).to(lowerCamelCase)
with torch.no_grad():
_lowercase : Optional[int] = model(lowerCamelCase, lowerCamelCase).sample
_lowercase : Optional[int] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_lowercase : Tuple = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6])
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase, lowerCamelCase, rtol=1E-2))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
| 89 |
'''simple docstring'''
import numpy
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : numpy.ndarray ) -> None:
__magic_name__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__magic_name__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__magic_name__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__magic_name__ = numpy.random.rand(3 , 1 )
# Real output values provided.
__magic_name__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__magic_name__ = numpy.zeros(output_array.shape )
def __A ( self : int ) -> numpy.ndarray:
__magic_name__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __A ( self : Dict ) -> None:
__magic_name__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__magic_name__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__magic_name__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __A ( self : Optional[int] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : int , _lowerCamelCase : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
__magic_name__ = self.feedforward()
self.back_propagation()
if give_loss:
__magic_name__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def __A ( self : Tuple , _lowerCamelCase : numpy.ndarray ) -> int:
__magic_name__ = input_arr
__magic_name__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__magic_name__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__magic_name__ = TwoHiddenLayerNeuralNetwork(
input_array=lowerCamelCase_ , output_array=lowerCamelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowerCamelCase_ , iterations=10 , give_loss=lowerCamelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any = "upernet"
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=5_12 , lowerCamelCase_=0.02 , lowerCamelCase_=[1, 2, 3, 6] , lowerCamelCase_=True , lowerCamelCase_=0.4 , lowerCamelCase_=3_84 , lowerCamelCase_=2_56 , lowerCamelCase_=1 , lowerCamelCase_=False , lowerCamelCase_=2_55 , **lowerCamelCase_ , ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowerCAmelCase__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = backbone_config.get('''model_type''' )
lowerCAmelCase__ = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase__ = config_class.from_dict(lowerCamelCase_ )
lowerCAmelCase__ = backbone_config
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = pool_scales
lowerCAmelCase__ = use_auxiliary_head
lowerCAmelCase__ = auxiliary_loss_weight
lowerCAmelCase__ = auxiliary_in_channels
lowerCAmelCase__ = auxiliary_channels
lowerCAmelCase__ = auxiliary_num_convs
lowerCAmelCase__ = auxiliary_concat_input
lowerCAmelCase__ = loss_ignore_index
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.backbone_config.to_dict()
lowerCAmelCase__ = self.__class__.model_type
return output | 90 |
'''simple docstring'''
import torch
from transformers import AutoModel
class UpperCamelCase_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Any , _lowerCamelCase : Optional[int]="sayef/fsner-bert-base-uncased" ) -> List[Any]:
super(_lowerCamelCase , self ).__init__()
__magic_name__ = AutoModel.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
__magic_name__ = torch.nn.CosineSimilarity(3 , 1e-08 )
__magic_name__ = torch.nn.Softmax(dim=1 )
def __A ( self : Tuple , **_lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return self.bert(**_lowerCamelCase ).last_hidden_state
def __A ( self : Dict , _lowerCamelCase : Dict ) -> Dict:
return token_embeddings.sum(2 , keepdim=_lowerCamelCase )
def __A ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Tuple=1 ) -> Optional[Any]:
return self.softmax(T * self.cos(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ) -> List[str]:
__magic_name__ = W_supports["sizes"].tolist()
__magic_name__ = W_supports["start_token_id"].item()
__magic_name__ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = W_supports["input_ids"] == start_token_id
__magic_name__ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_lowerCamelCase ):
if i == 0:
__magic_name__ = 0
else:
__magic_name__ = support_sizes[i - 1]
__magic_name__ = S[s : s + size][start_token_masks[s : s + size]]
__magic_name__ = S[s : s + size][end_token_masks[s : s + size]]
__magic_name__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__magic_name__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__magic_name__ = torch.vstack((p_starts, p_start) )
__magic_name__ = torch.vstack((p_ends, p_end) )
else:
__magic_name__ = p_start
__magic_name__ = p_end
return p_starts, p_ends
| 664 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : str ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod() | 91 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 664 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'speech_to_text_2'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , UpperCAmelCase__ : Dict=10000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1024 , **UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : List[str] =vocab_size
lowercase : Optional[int] =d_model
lowercase : Optional[Any] =decoder_ffn_dim
lowercase : Any =decoder_layers
lowercase : Dict =decoder_attention_heads
lowercase : List[Any] =dropout
lowercase : List[Any] =attention_dropout
lowercase : Any =activation_dropout
lowercase : Optional[Any] =activation_function
lowercase : Optional[int] =init_std
lowercase : Dict =decoder_layerdrop
lowercase : Optional[int] =use_cache
lowercase : Optional[Any] =decoder_layers
lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True
lowercase : str =max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 92 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __snake_case ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = AutoConfig.from_pretrained(lowerCamelCase_ )
__magic_name__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_ )
__magic_name__ = checkpoints.load_tax_checkpoint(lowerCamelCase_ )
__magic_name__ = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
__magic_name__ = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__magic_name__ = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["encoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_global_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = tax_mlp_layer_norm
__magic_name__ = flax_model_encoder_layer_block
# Only for layer 0:
__magic_name__ = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_global_rel_embedding
# Assigning
__magic_name__ = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
__magic_name__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
__magic_name__ = tax_enc_dec_attention_module["key"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["out"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["query"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["decoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_pre_attention_layer_norm
__magic_name__ = tax_enc_dec_attention_key
__magic_name__ = tax_enc_dec_attention_out
__magic_name__ = tax_enc_dec_attention_query
__magic_name__ = tax_enc_dec_attention_value
__magic_name__ = tax_cross_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = txa_mlp_layer_norm
__magic_name__ = flax_model_decoder_layer_block
# Decoder Normalization
__magic_name__ = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
__magic_name__ = txa_decoder_norm
# Only for layer 0:
__magic_name__ = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_decoder_rel_embedding
# Token Embeddings
__magic_name__ = tax_model["target"]["token_embedder"]["embedding"]
__magic_name__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__magic_name__ = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(lowerCamelCase_ )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
__magic_name__ : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
__magic_name__ : Optional[int] =parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 664 | 0 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = """https://openaipublic.azureedge.net/jukebox/models/"""
__A = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
lowerCAmelCase__ :int = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
lowerCAmelCase__ :Dict = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
lowerCAmelCase__ :Any = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
lowerCAmelCase__ :Optional[int] = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
lowerCAmelCase__ :List[Any] = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
lowerCAmelCase__ :Dict = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCAmelCase__ :Any = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
lowerCAmelCase__ :List[str] = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = {}
import re
lowerCAmelCase__ :str = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
lowerCAmelCase__ :List[Any] = re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
lowerCAmelCase__ :Optional[Any] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
lowerCAmelCase__ :Tuple = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
lowerCAmelCase__ :Union[str, Any] = re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
lowerCAmelCase__ :Any = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
lowerCAmelCase__ :Optional[Any] = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
lowerCAmelCase__ :Any = re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
lowerCAmelCase__ :int = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = re_encoder_block_conv_in.match(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = regex_match.groups()
lowerCAmelCase__ :int = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase__ :Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
lowerCAmelCase__ :str = re_encoder_block_conv_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :str = re_encoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = regex_match.groups()
lowerCAmelCase__ :int = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase__ :Union[str, Any] = {'1': 1, '3': 2}[groups[-2]]
lowerCAmelCase__ :int = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
lowerCAmelCase__ :Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
lowerCAmelCase__ :List[Any] = prefix + resnet_block
lowerCAmelCase__ :List[Any] = re_encoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :str = re_encoder_block_proj_out.match(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = regex_match.groups()
lowerCAmelCase__ :Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
lowerCAmelCase__ :Optional[Any] = re_encoder_block_proj_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :str = re_decoder_block_conv_out.match(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = regex_match.groups()
lowerCAmelCase__ :Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase__ :Union[str, Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
lowerCAmelCase__ :List[str] = re_decoder_block_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :List[Any] = re_decoder_block_resnet.match(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = regex_match.groups()
lowerCAmelCase__ :Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase__ :Dict = {'1': 1, '3': 2}[groups[-2]]
lowerCAmelCase__ :Any = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
lowerCAmelCase__ :Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
lowerCAmelCase__ :Union[str, Any] = prefix + resnet_block
lowerCAmelCase__ :List[Any] = re_decoder_block_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = re_decoder_block_proj_in.match(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = regex_match.groups()
lowerCAmelCase__ :Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
lowerCAmelCase__ :Any = re_decoder_block_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Dict = re_prior_cond_conv_out.match(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = regex_match.groups()
lowerCAmelCase__ :Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase__ :Optional[int] = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
lowerCAmelCase__ :Union[str, Any] = re_prior_cond_conv_out.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Tuple = re_prior_cond_resnet.match(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = regex_match.groups()
lowerCAmelCase__ :Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase__ :str = {'1': 1, '3': 2}[groups[-2]]
lowerCAmelCase__ :Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
lowerCAmelCase__ :List[str] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
lowerCAmelCase__ :str = prefix + resnet_block
lowerCAmelCase__ :Tuple = re_prior_cond_resnet.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Any = re_prior_cond_proj_in.match(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = regex_match.groups()
lowerCAmelCase__ :List[str] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
lowerCAmelCase__ :str = re_prior_cond_proj_in.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# keep original key
else:
lowerCAmelCase__ :Optional[Any] = original_key
lowerCAmelCase__ :int = replace_key(_SCREAMING_SNAKE_CASE )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
lowerCAmelCase__ :Tuple = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
lowerCAmelCase__ :List[Any] = original_key
lowerCAmelCase__ :Union[str, Any] = original_key
lowerCAmelCase__ :Tuple = value
return new_dict
@torch.no_grad()
def __A (_SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
lowerCAmelCase__ :str = requests.get(F"{PREFIX}{file}" , allow_redirects=_SCREAMING_SNAKE_CASE )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_SCREAMING_SNAKE_CASE )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
lowerCAmelCase__ :List[str] = MODEL_MAPPING[model_name.split('/' )[-1]]
lowerCAmelCase__ :List[Any] = JukeboxConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Union[str, Any] = JukeboxModel(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = []
lowerCAmelCase__ :Optional[int] = {}
for i, dict_name in enumerate(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
lowerCAmelCase__ :List[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
lowerCAmelCase__ :Dict = old_dic[k]
elif k.endswith('.w' ):
lowerCAmelCase__ :Optional[int] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCAmelCase__ :Dict = old_dic[k]
else:
lowerCAmelCase__ :List[str] = old_dic[k]
lowerCAmelCase__ :Union[str, Any] = 'vqvae' if i == 0 else F"priors.{3 - i}"
lowerCAmelCase__ :Optional[Any] = fix_jukebox_keys(_SCREAMING_SNAKE_CASE , model.state_dict() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
weight_dict.append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Union[str, Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
__A = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 93 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase_ ( unittest.TestCase , A ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Any:
__magic_name__ = load_tool("text-to-speech" )
self.tool.setup()
def __A ( self : Union[str, Any] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def __A ( self : List[str] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 664 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowercase_ ( __A : str ) -> None:
"""simple docstring"""
lowercase , lowercase : Any =analyze_text(__A )
lowercase : str =list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
lowercase : str =sum(single_char_strings.values() )
# one length string
lowercase : List[Any] =0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowercase : Dict =single_char_strings[ch]
lowercase : Dict =my_str / all_sum
my_fir_sum += prob * math.loga(__A ) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum ):.1f}' )
# two len string
lowercase : int =sum(two_char_strings.values() )
lowercase : Tuple =0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowercase : str =cha + cha
if sequence in two_char_strings:
lowercase : Tuple =two_char_strings[sequence]
lowercase : Optional[Any] =int(__A ) / all_sum
my_sec_sum += prob * math.loga(__A )
# print second entropy
print(F'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def lowercase_ ( __A : str ) -> tuple[dict, dict]:
"""simple docstring"""
lowercase : Union[str, Any] =Counter() # type: ignore
lowercase : Optional[int] =Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__A ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowercase_ ( ) -> Optional[int]:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 94 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__magic_name__ : Dict =re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
__magic_name__ : int =10
__magic_name__ : Union[str, Any] =2_56
def __snake_case ( lowerCamelCase_ : List[str] ):
'''simple docstring'''
if len(lowerCamelCase_ ) < MIN_NUM_TOKENS:
return None
__magic_name__ = MinHash(num_perm=lowerCamelCase_ )
for token in set(lowerCamelCase_ ):
min_hash.update(token.encode() )
return min_hash
def __snake_case ( lowerCamelCase_ : str ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCamelCase_ ) if len(t.strip() ) > 0}
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *,
_lowerCamelCase : float = 0.85 , ) -> Optional[Any]:
__magic_name__ = duplication_jaccard_threshold
__magic_name__ = NUM_PERM
__magic_name__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__magic_name__ = defaultdict(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : MinHash ) -> None:
__magic_name__ = self._index.query(_lowerCamelCase )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCamelCase )
def __A ( self : Union[str, Any] ) -> List[List[Dict]]:
__magic_name__ = []
for base, duplicates in self._duplicate_clusters.items():
__magic_name__ = [base] + list(_lowerCamelCase )
# reformat the cluster to be a list of dict
__magic_name__ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(_lowerCamelCase )
return duplicate_clusters
def __A ( self : Tuple , _lowerCamelCase : Tuple ) -> None:
__magic_name__ = self.get_duplicate_clusters()
with open(_lowerCamelCase , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def __snake_case ( lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ , __magic_name__ = element
__magic_name__ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __snake_case ( lowerCamelCase_ : Type[Dataset] ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCamelCase_ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float ):
'''simple docstring'''
__magic_name__ = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCamelCase_ , lowerCamelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = get_tokens(lowerCamelCase_ )
__magic_name__ = get_tokens(lowerCamelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__magic_name__ : List[str] =None
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ = []
for elementa in cluster:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(lowerCamelCase_ , lowerCamelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__magic_name__ = 1
extremes.append(lowerCamelCase_ )
return extremes
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
global _shared_dataset
__magic_name__ = dataset
__magic_name__ = []
__magic_name__ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCamelCase_ , lowerCamelCase_ , ) , total=len(lowerCamelCase_ ) , ):
extremes_list.append(lowerCamelCase_ )
return extremes_list
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float = 0.85 ):
'''simple docstring'''
__magic_name__ = make_duplicate_clusters(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
__magic_name__ = {}
__magic_name__ = find_extremes(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__magic_name__ = element
__magic_name__ = duplicate_indices - set(extreme_dict.keys() )
__magic_name__ = dataset.filter(lambda lowerCamelCase_ , lowerCamelCase_ : idx not in remove_indices , with_indices=lowerCamelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__magic_name__ = element["base_index"] in extreme_dict
if element["is_extreme"]:
__magic_name__ = extreme_dict[element["base_index"]]["copies"]
print(F'Original dataset size: {len(lowerCamelCase_ )}' )
print(F'Number of duplicate clusters: {len(lowerCamelCase_ )}' )
print(F'Files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Unique files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Filtered dataset size: {len(lowerCamelCase_ )}' )
return ds_filter, duplicate_clusters
| 664 | 0 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCamelCase_ (__A ):
__magic_name__ = ''''''
__magic_name__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__magic_name__ = None # compression type in fsspec. ex: "gzip"
__magic_name__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , lowerCAmelCase_ : str = "" , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[dict] = None , **lowerCAmelCase_ : List[str] ) -> List[Any]:
super().__init__(self , **lowerCAmelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase_ : Dict = fsspec.open(
lowerCAmelCase_ , mode="rb" , protocol=lowerCAmelCase_ , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCAmelCase_ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] )
UpperCAmelCase_ : Any = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
UpperCAmelCase_ : Any = None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCAmelCase_ ).lstrip("/" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
if self.dir_cache is None:
UpperCAmelCase_ : Optional[Any] = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
UpperCAmelCase_ : str = {f["name"]: f}
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : str ) -> Any:
return self.file.open().read()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str = "rb" , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : Tuple , ) -> Tuple:
UpperCAmelCase_ : List[str] = self._strip_protocol(lowerCAmelCase_ )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class UpperCamelCase_ (__A ):
__magic_name__ = '''bz2'''
__magic_name__ = '''bz2'''
__magic_name__ = '''.bz2'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''gzip'''
__magic_name__ = '''gzip'''
__magic_name__ = '''.gz'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''lz4'''
__magic_name__ = '''lz4'''
__magic_name__ = '''.lz4'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''xz'''
__magic_name__ = '''xz'''
__magic_name__ = '''.xz'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''zstd'''
__magic_name__ = '''zstd'''
__magic_name__ = '''.zst'''
def __init__( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : str = "rb" , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[dict] = None , lowerCAmelCase_ : int = DEFAULT_BLOCK_SIZE , **lowerCAmelCase_ : List[Any] , ) -> Dict:
super().__init__(
fo=lowerCAmelCase_ , mode=lowerCAmelCase_ , target_protocol=lowerCAmelCase_ , target_options=lowerCAmelCase_ , block_size=lowerCAmelCase_ , **lowerCAmelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase_ : Optional[Any] = self.file.__enter__
class UpperCamelCase_ :
def __init__( self : Tuple , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = file_
def __enter__( self : Tuple ) -> List[Any]:
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : int ) -> Optional[int]:
self._file.__exit__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __iter__( self : Optional[int] ) -> int:
return iter(self._file )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return next(self._file )
def __getattr__( self : Optional[int] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
return getattr(self._file , lowerCAmelCase_ )
def fixed_enter(*lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[Any] ):
return WrappedFile(_enter(*lowerCAmelCase_ , **lowerCAmelCase_ ) )
UpperCAmelCase_ : List[Any] = fixed_enter
| 95 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
__magic_name__ : Optional[int] =logging.get_logger(__name__)
__magic_name__ : Tuple ='The Nymphenburg Palace is a beautiful palace in Munich!'
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
__magic_name__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__magic_name__ = os.path.join(get_home_dir() , "models" )
__magic_name__ = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__magic_name__ = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__magic_name__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(lowerCamelCase_ ),
}
__magic_name__ = BertConfig.from_dict(lowerCamelCase_ )
__magic_name__ = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ):
__magic_name__ = hf_param.shape
__magic_name__ = to_torch(params[gluon_param] )
__magic_name__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ = layer.attention.self
__magic_name__ = check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__magic_name__ = check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__magic_name__ = check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__magic_name__ = check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__magic_name__ = check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__magic_name__ = check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__magic_name__ = layer.attention.output
__magic_name__ = check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
__magic_name__ = check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__magic_name__ = layer.intermediate
__magic_name__ = check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__magic_name__ = check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__magic_name__ = layer.output
__magic_name__ = check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__magic_name__ = check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ = RobertaTokenizer.from_pretrained("roberta-base" )
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ )["input_ids"]
# Get gluon output
__magic_name__ = mx.nd.array([input_ids] )
__magic_name__ = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__magic_name__ = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ , return_tensors="pt" )
__magic_name__ = hf_bort_model(**lowerCamelCase_ )[0]
__magic_name__ = output_gluon[0].asnumpy()
__magic_name__ = output_hf[0].detach().numpy()
__magic_name__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , lowerCamelCase_ )
if __name__ == "__main__":
__magic_name__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ : Optional[Any] =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 664 | 0 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__lowerCamelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def a ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] ) -> Tuple:
for attribute in key.split(""".""" ):
__magic_name__: Tuple = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
__magic_name__: Dict = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
__magic_name__: Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__magic_name__: Union[str, Any] = value
elif weight_type == "weight_g":
__magic_name__: str = value
elif weight_type == "weight_v":
__magic_name__: Optional[Any] = value
elif weight_type == "bias":
__magic_name__: Tuple = value
else:
__magic_name__: Union[str, Any] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a ( __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
__magic_name__: Union[str, Any] = []
__magic_name__: Tuple = fairseq_model.state_dict()
__magic_name__: List[str] = hf_model.feature_extractor
__magic_name__: str = hf_model.adapter
for name, value in fairseq_dict.items():
__magic_name__: str = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__magic_name__: List[Any] = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__magic_name__: str = True
if "*" in mapped_key:
__magic_name__: Dict = name.split(__UpperCAmelCase )[0].split(""".""" )[-2]
__magic_name__: Optional[int] = mapped_key.replace("""*""" , __UpperCAmelCase )
if "weight_g" in name:
__magic_name__: Optional[Any] = """weight_g"""
elif "weight_v" in name:
__magic_name__: Dict = """weight_v"""
elif "bias" in name:
__magic_name__: List[Any] = """bias"""
elif "weight" in name:
__magic_name__: int = """weight"""
else:
__magic_name__: Optional[Any] = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def a ( __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
__magic_name__: List[Any] = full_name.split("""conv_layers.""" )[-1]
__magic_name__: str = name.split(""".""" )
__magic_name__: List[Any] = int(items[0] )
__magic_name__: int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__magic_name__: Dict = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__magic_name__: Any = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__magic_name__: int = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__magic_name__: Tuple = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ) -> str:
__magic_name__: Dict = full_name.split("""adaptor.""" )[-1]
__magic_name__: Dict = name.split(""".""" )
if items[1].isdigit():
__magic_name__: Optional[int] = int(items[1] )
else:
__magic_name__: Any = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
__magic_name__: str = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
__magic_name__: Tuple = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
__magic_name__: Dict = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
__magic_name__: Optional[int] = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
__magic_name__: int = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
__magic_name__: Optional[Any] = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
def a ( __UpperCAmelCase : List[str] ) -> Any:
__magic_name__, __magic_name__: Tuple = emb.weight.shape
__magic_name__: Union[str, Any] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
__magic_name__: Optional[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , ) -> Optional[int]:
__magic_name__: Optional[int] = WavaVecaConfig.from_pretrained(
__UpperCAmelCase , add_adapter=__UpperCAmelCase , adapter_stride=__UpperCAmelCase , adapter_kernel_size=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , output_hidden_size=__UpperCAmelCase , )
__magic_name__: Optional[int] = MBartConfig.from_pretrained(__UpperCAmelCase )
# load model
__magic_name__, __magic_name__, __magic_name__: str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
__magic_name__: Tuple = model[0].eval()
# load feature extractor
__magic_name__: Tuple = WavaVecaFeatureExtractor.from_pretrained(__UpperCAmelCase , use_auth_token=__UpperCAmelCase )
# set weights for wav2vec2 encoder
__magic_name__: int = WavaVecaModel(__UpperCAmelCase )
recursively_load_weights_wavaveca(model.encoder , __UpperCAmelCase )
# load decoder weights
__magic_name__: Optional[Any] = MBartForCausalLM(__UpperCAmelCase )
__magic_name__, __magic_name__: Dict = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__UpperCAmelCase )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
__magic_name__: List[str] = SpeechEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
__magic_name__: Optional[int] = False
__magic_name__: Union[str, Any] = MBartaaTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
__magic_name__: List[Any] = hf_wavavec.config.to_dict()
__magic_name__: Any = tokenizer.pad_token_id
__magic_name__: Union[str, Any] = tokenizer.bos_token_id
__magic_name__: Optional[int] = tokenizer.eos_token_id
__magic_name__: int = """mbart50"""
__magic_name__: int = """wav2vec2"""
__magic_name__: str = tokenizer.eos_token_id
__magic_name__: Tuple = 2_5_0_0_0_4
__magic_name__: Optional[int] = tokenizer.eos_token_id
__magic_name__: List[Any] = SpeechEncoderDecoderConfig.from_dict(__UpperCAmelCase )
hf_wavavec.save_pretrained(__UpperCAmelCase )
feature_extractor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=10_24, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=25_00_04, type=int, help='`decoder_start_token_id` of model config')
__lowerCamelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 96 |
'''simple docstring'''
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def a ( snake_case__: List[Any] , snake_case__: Dict , snake_case__: List[Any] , snake_case__: List[Any] ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def a ( snake_case__: List[str] , snake_case__: Any , snake_case__: List[str] , snake_case__: Optional[int] , snake_case__: List[Any]=True ):
'''simple docstring'''
model.train()
lowercase_ = model(snake_case__ )
lowercase_ = F.mse_loss(snake_case__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case__ )
def a ( snake_case__: Any , snake_case__: Optional[Any]=False ):
'''simple docstring'''
set_seed(42 )
lowercase_ = RegressionModel()
lowercase_ = deepcopy(snake_case__ )
lowercase_ = RegressionDataset(length=80 )
lowercase_ = DataLoader(snake_case__ , batch_size=16 )
model.to(accelerator.device )
if sched:
lowercase_ = AdamW(params=model.parameters() , lr=1e-3 )
lowercase_ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
lowercase_ = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.6_5 )
lowercase_ = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
lowercase_ , lowercase_ = accelerator.prepare(snake_case__ , snake_case__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def a ( snake_case__: Union[str, Any] ):
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
lowercase_ , lowercase_ , lowercase_ = get_training_setup(snake_case__ )
# Use a single batch
lowercase_ , lowercase_ = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase_ = ddp_input[torch.randperm(len(snake_case__ ) )]
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
lowercase_ , lowercase_ , lowercase_ = get_training_setup(snake_case__ )
# Use a single batch
lowercase_ , lowercase_ = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase_ = ddp_input[torch.randperm(len(snake_case__ ) )]
def a ( snake_case__: int=False , snake_case__: Union[str, Any]=False ):
'''simple docstring'''
lowercase_ = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase_ , lowercase_ , lowercase_ = get_training_setup(snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
lowercase_ , lowercase_ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase_ = ddp_input[torch.randperm(len(snake_case__ ) )]
GradientState._reset_state()
def a ( snake_case__: Optional[int]=False , snake_case__: List[str]=False ):
'''simple docstring'''
lowercase_ = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = get_training_setup(snake_case__ , snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
lowercase_ , lowercase_ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
lowercase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case__ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def a ( ):
'''simple docstring'''
lowercase_ = Accelerator()
lowercase_ = RegressionDataset(length=80 )
lowercase_ = DataLoader(snake_case__ , batch_size=16 )
lowercase_ = RegressionDataset(length=96 )
lowercase_ = DataLoader(snake_case__ , batch_size=16 )
lowercase_ , lowercase_ = accelerator.prepare(snake_case__ , snake_case__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if iteration < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if batch_num < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def a ( ):
'''simple docstring'''
lowercase_ = Accelerator()
lowercase_ = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(snake_case__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(snake_case__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(snake_case__ , snake_case__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case__ , snake_case__ )
def a ( snake_case__: List[str] ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 97 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__magic_name__ : Tuple =threading.Lock()
__magic_name__ : Optional[logging.Handler] =None
__magic_name__ : List[str] ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__magic_name__ : str =logging.WARNING
__magic_name__ : Any =True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_VERBOSITY" , lowerCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def __snake_case ( ):
'''simple docstring'''
return __name__.split("." )[0]
def __snake_case ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__magic_name__ = logging.StreamHandler() # Set sys.stderr as stream.
__magic_name__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
__magic_name__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
__magic_name__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__magic_name__ = None
def __snake_case ( ):
'''simple docstring'''
return log_levels
def __snake_case ( lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if name is None:
__magic_name__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __snake_case ( lowerCamelCase_ : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCamelCase_ )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
__magic_name__ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , lowerCamelCase_ )
if no_advisory_warnings:
return
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : int =warning_advice
@functools.lru_cache(lowerCamelCase_ )
def __snake_case ( self : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : int ):
'''simple docstring'''
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : Optional[int] =warning_once
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ) -> Any: # pylint: disable=unused-argument
__magic_name__ = args[0] if args else None
def __iter__( self : int ) -> Tuple:
return iter(self._iterator )
def __getattr__( self : List[Any] , _lowerCamelCase : int ) -> List[Any]:
def empty_fn(*_lowerCamelCase : List[str] , **_lowerCamelCase : List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ) -> Any:
return self
def __exit__( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ) -> Dict:
return
class UpperCamelCase_ :
"""simple docstring"""
def __call__( self : Any , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Any ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*_lowerCamelCase , **_lowerCamelCase )
else:
return EmptyTqdm(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : Optional[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ) -> Union[str, Any]:
__magic_name__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : str ) -> Any:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__magic_name__ : List[Any] =_tqdm_cls()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = True
hf_hub_utils.enable_progress_bars()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = False
hf_hub_utils.disable_progress_bars()
| 664 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : int = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : List[Any] = 'gpt_neox'
def __init__( self : str , lowerCAmelCase__ : Optional[int]=50432 , lowerCAmelCase__ : Optional[int]=6144 , lowerCAmelCase__ : Optional[Any]=44 , lowerCAmelCase__ : Optional[Any]=64 , lowerCAmelCase__ : List[str]=24576 , lowerCAmelCase__ : Any="gelu" , lowerCAmelCase__ : Any=0.25 , lowerCAmelCase__ : str=10000 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Dict=2048 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : Tuple=1e-5 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : int=None , **lowerCAmelCase__ : int , ) -> Tuple:
'''simple docstring'''
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = rotary_pct
_UpperCamelCase = rotary_emb_base
_UpperCamelCase = attention_dropout
_UpperCamelCase = hidden_dropout
_UpperCamelCase = classifier_dropout
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = use_cache
_UpperCamelCase = tie_word_embeddings
_UpperCamelCase = use_parallel_residual
_UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
_UpperCamelCase = self.rope_scaling.get('''type''' , lowerCAmelCase__ )
_UpperCamelCase = self.rope_scaling.get('''factor''' , lowerCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 98 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Union[str, Any] ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : str =[
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
from __future__ import annotations
from typing import Any
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A , __A , __A = 0 ):
__a , __a = row, column
__a = [[default_value for c in range(__A )] for r in range(__A )]
def __str__( self ):
__a = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__a = 0
for row_vector in self.array:
for obj in row_vector:
__a = max(__A , len(str(__A ) ) )
__a = f'''%{max_element_length}s'''
# Make string and return
def single_line(__A ) -> str:
nonlocal string_format_identifier
__a = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__A ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def snake_case_ ( self , __A ):
if not (isinstance(__A , (list, tuple) ) and len(__A ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , __A ):
assert self.validate_indicies(__A )
return self.array[loc[0]][loc[1]]
def __setitem__( self , __A , __A ):
assert self.validate_indicies(__A )
__a = value
def __add__( self , __A ):
assert isinstance(__A , __A )
assert self.row == another.row and self.column == another.column
# Add
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] + another[r, c]
return result
def __neg__( self ):
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = -self[r, c]
return result
def __sub__( self , __A ):
return self + (-another)
def __mul__( self , __A ):
if isinstance(__A , (int, float) ): # Scalar multiplication
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] * another
return result
elif isinstance(__A , __A ): # Matrix multiplication
assert self.column == another.row
__a = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__a = f'''Unsupported type given for another ({type(__A )})'''
raise TypeError(__A )
def snake_case_ ( self ):
__a = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c]
return result
def snake_case_ ( self , __A , __A ):
assert isinstance(__A , __A ) and isinstance(__A , __A )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__a = v.transpose()
__a = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a ():
# a^(-1)
__a = Matrix(3 , 3 , 0 )
for i in range(3 ):
__a = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 1, 2, -3
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
def a ():
import doctest
doctest.testmod()
testa()
| 99 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ : Optional[Any] ={
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int =['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict =[
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple =[
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__magic_name__ : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 0 |
import numpy as np
import datasets
_A : Union[str, Any] = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
_A : Optional[Any] = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
_A : Union[str, Any] = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def lowercase_ ( self , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(A_ )
SCREAMING_SNAKE_CASE__ = np.array(A_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
SCREAMING_SNAKE_CASE__ = X - np.mean(A_ )
SCREAMING_SNAKE_CASE__ = np.cov(reference_distribution.T )
try:
SCREAMING_SNAKE_CASE__ = np.linalg.inv(A_ )
except np.linalg.LinAlgError:
SCREAMING_SNAKE_CASE__ = np.linalg.pinv(A_ )
SCREAMING_SNAKE_CASE__ = np.dot(A_ , A_ )
SCREAMING_SNAKE_CASE__ = np.dot(A_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 100 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__magic_name__ : str ={
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__magic_name__ : Tuple ={
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = (images / 2 + 0.5).clamp(0 , 1 )
__magic_name__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ = numpy_to_pil(lowerCamelCase_ )
return images
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
if images.ndim == 3:
__magic_name__ = images[None, ...]
__magic_name__ = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__magic_name__ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
__magic_name__ = [Image.fromarray(lowerCamelCase_ ) for image in images]
return pil_images
| 664 | 0 |
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Dict = len(A__ ) + 1
SCREAMING_SNAKE_CASE_ : List[str] = len(A__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
SCREAMING_SNAKE_CASE_ : Any = [[0 for i in range(A__ )] for j in range(A__ )]
# since string of zero length match pattern of zero length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1, A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1, A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1, A__ ):
for j in range(1, A__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
SCREAMING_SNAKE_CASE_ : Any = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
SCREAMING_SNAKE_CASE_ : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
SCREAMING_SNAKE_CASE_ : List[Any] = dp[i - 1][j]
else:
SCREAMING_SNAKE_CASE_ : List[str] = 0
else:
SCREAMING_SNAKE_CASE_ : int = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
lowerCAmelCase__ : List[Any] ='aab'
lowerCAmelCase__ : Dict ='c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 101 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__magic_name__ : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(
A , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : Any , _lowerCamelCase : GenericTensor ) -> np.ndarray:
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase )
else:
raise ValueError("Unsupported framework" )
return masked_index
def __A ( self : str , _lowerCamelCase : GenericTensor ) -> np.ndarray:
__magic_name__ = self.get_masked_index(_lowerCamelCase )
__magic_name__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __A ( self : int , _lowerCamelCase : GenericTensor ) -> Any:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] ) -> Dict[str, GenericTensor]:
if return_tensors is None:
__magic_name__ = self.framework
__magic_name__ = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase )
self.ensure_exactly_one_mask_token(_lowerCamelCase )
return model_inputs
def __A ( self : List[str] , _lowerCamelCase : int ) -> List[Any]:
__magic_name__ = self.model(**_lowerCamelCase )
__magic_name__ = model_inputs["input_ids"]
return model_outputs
def __A ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Dict=None ) -> Dict:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__magic_name__ = target_ids.shape[0]
__magic_name__ = model_outputs["input_ids"][0]
__magic_name__ = model_outputs["logits"]
if self.framework == "tf":
__magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__magic_name__ = outputs.numpy()
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = stable_softmax(_lowerCamelCase , axis=-1 )
if target_ids is not None:
__magic_name__ = tf.gather_nd(tf.squeeze(_lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
__magic_name__ = tf.expand_dims(_lowerCamelCase , 0 )
__magic_name__ = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
__magic_name__ , __magic_name__ = topk.values.numpy(), topk.indices.numpy()
else:
__magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__magic_name__ = outputs[0, masked_index, :]
__magic_name__ = logits.softmax(dim=-1 )
if target_ids is not None:
__magic_name__ = probs[..., target_ids]
__magic_name__ , __magic_name__ = probs.topk(_lowerCamelCase )
__magic_name__ = []
__magic_name__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__magic_name__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__magic_name__ = input_ids.numpy().copy()
if target_ids is not None:
__magic_name__ = target_ids[p].tolist()
__magic_name__ = p
# Filter padding out:
__magic_name__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__magic_name__ = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
__magic_name__ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(_lowerCamelCase )
result.append(_lowerCamelCase )
if single_mask:
return result[0]
return result
def __A ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=None ) -> List[str]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = [targets]
try:
__magic_name__ = self.tokenizer.get_vocab()
except Exception:
__magic_name__ = {}
__magic_name__ = []
for target in targets:
__magic_name__ = vocab.get(_lowerCamelCase , _lowerCamelCase )
if id_ is None:
__magic_name__ = self.tokenizer(
_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , max_length=1 , truncation=_lowerCamelCase , )["input_ids"]
if len(_lowerCamelCase ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"We cannot replace it with anything meaningful, ignoring it" )
continue
__magic_name__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__magic_name__ = list(set(_lowerCamelCase ) )
if len(_lowerCamelCase ) == 0:
raise ValueError("At least one target must be provided when passed." )
__magic_name__ = np.array(_lowerCamelCase )
return target_ids
def __A ( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : int=None ) -> Tuple:
__magic_name__ = {}
if targets is not None:
__magic_name__ = self.get_target_ids(_lowerCamelCase , _lowerCamelCase )
__magic_name__ = target_ids
if top_k is not None:
__magic_name__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : int , _lowerCamelCase : Any , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Optional[int]:
__magic_name__ = super().__call__(_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 664 | 0 |
"""simple docstring"""
__magic_name__ : dict[tuple[int, int, int], int] = {}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCamelCase : Any = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCamelCase : Dict = _calculate(days - 1 , SCREAMING_SNAKE_CASE , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCamelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCamelCase : Dict = _calculate(days - 1 , SCREAMING_SNAKE_CASE , 0 )
UpperCamelCase : Optional[int] = state_late + state_absent + state_ontime
UpperCamelCase : Any = prizestrings
return prizestrings
def UpperCamelCase (SCREAMING_SNAKE_CASE = 30 ):
return _calculate(SCREAMING_SNAKE_CASE , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 102 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ):
'''simple docstring'''
if len(lowerCamelCase_ ) < k or k < 0:
raise ValueError("Invalid Input" )
__magic_name__ = __magic_name__ = sum(array[:k] )
for i in range(len(lowerCamelCase_ ) - k ):
__magic_name__ = current_sum - array[i] + array[i + k]
__magic_name__ = max(lowerCamelCase_ , lowerCamelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__magic_name__ : List[str] =[randint(-10_00, 10_00) for i in range(1_00)]
__magic_name__ : List[str] =randint(0, 1_10)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 664 | 0 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case = ['''text''', '''image''', '''audio''']
def snake_case ( lowerCAmelCase_ ) -> Any:
_snake_case = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
inputs.append(create_inputs(lowerCAmelCase_ ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def snake_case ( lowerCAmelCase_ ) -> str:
_snake_case = []
for output in outputs:
if isinstance(lowerCAmelCase_ , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(lowerCAmelCase_ , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(lowerCAmelCase_ , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class UpperCAmelCase :
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
_snake_case = self.tool.inputs
for _input in inputs:
if isinstance(_input , __lowerCamelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
_snake_case = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = create_inputs(self.tool.inputs )
_snake_case = self.tool(*__lowerCamelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
_snake_case = [outputs]
self.assertListEqual(output_types(__lowerCamelCase ) , self.tool.outputs )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = create_inputs(self.tool.inputs )
_snake_case = self.tool(*__lowerCamelCase )
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [outputs]
self.assertEqual(len(__lowerCamelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(__lowerCamelCase , self.tool.outputs ):
_snake_case = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__lowerCamelCase , __lowerCamelCase ) )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = create_inputs(self.tool.inputs )
_snake_case = []
for _input, input_type in zip(__lowerCamelCase , self.tool.inputs ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
_snake_case = self.tool(*__lowerCamelCase )
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [outputs]
self.assertEqual(len(__lowerCamelCase ) , len(self.tool.outputs ) )
| 103 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : int =logging.get_logger(__name__)
__magic_name__ : List[Any] ={}
class UpperCamelCase_ ( A ):
"""simple docstring"""
UpperCAmelCase__ : int = '''llama'''
UpperCAmelCase__ : Any = ['''past_key_values''']
def __init__( self : List[Any] , _lowerCamelCase : List[Any]=3_20_00 , _lowerCamelCase : Optional[Any]=40_96 , _lowerCamelCase : Tuple=1_10_08 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="silu" , _lowerCamelCase : Optional[Any]=20_48 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Union[str, Any]=1e-6 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Dict=0 , _lowerCamelCase : int=1 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[str]=None , **_lowerCamelCase : List[Any] , ) -> Any:
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = intermediate_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__magic_name__ = num_attention_heads
__magic_name__ = num_key_value_heads
__magic_name__ = hidden_act
__magic_name__ = initializer_range
__magic_name__ = rms_norm_eps
__magic_name__ = pretraining_tp
__magic_name__ = use_cache
__magic_name__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def __A ( self : Union[str, Any] ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
__magic_name__ = self.rope_scaling.get("type" , _lowerCamelCase )
__magic_name__ = self.rope_scaling.get("factor" , _lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 664 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ) -> int:
A__ = parent
A__ = 13
A__ = 7
A__ = True
A__ = True
A__ = True
A__ = True
A__ = 99
A__ = 32
A__ = 2
A__ = 4
A__ = 37
A__ = "gelu"
A__ = 0.1
A__ = 0.1
A__ = 512
A__ = 16
A__ = 2
A__ = 0.0_2
A__ = 3
A__ = 4
A__ = None
def snake_case__ ( self ) -> str:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
A__ = TFRoFormerModel(config=SCREAMING_SNAKE_CASE__ )
A__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A__ = [input_ids, input_mask]
A__ = model(SCREAMING_SNAKE_CASE__ )
A__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
A__ = True
A__ = TFRoFormerForCausalLM(config=SCREAMING_SNAKE_CASE__ )
A__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A__ = model(SCREAMING_SNAKE_CASE__ )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
A__ = TFRoFormerForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
A__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
A__ = self.num_labels
A__ = TFRoFormerForSequenceClassification(config=SCREAMING_SNAKE_CASE__ )
A__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
A__ = self.num_choices
A__ = TFRoFormerForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
A__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , (1, self.num_choices, 1) )
A__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
A__ = self.num_labels
A__ = TFRoFormerForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
A__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
A__ = TFRoFormerForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
A__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self ) -> Any:
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
A__ : List[Any] = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : int = False
A__ : List[str] = False
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def snake_case__ ( self ) -> Tuple:
A__ = TFRoFormerModelTester(self )
A__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def snake_case__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> str:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> int:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> str:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> List[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Tuple:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Any:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def snake_case__ ( self ) -> Dict:
A__ = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self ) -> Optional[int]:
A__ = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
A__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ = model(SCREAMING_SNAKE_CASE__ )[0]
# TODO Replace vocab size
A__ = 50000
A__ = [1, 6, vocab_size]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
A__ = tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = 1e-4
def snake_case__ ( self ) -> Optional[Any]:
A__ = tf.constant([[4, 10]] )
A__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
A__ = emba(input_ids.shape )
A__ = tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=self.tolerance )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
A__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
A__ = emba.weight[:3, :5]
tf.debugging.assert_near(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=self.tolerance )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
A__ : str = 1e-4
def snake_case__ ( self ) -> List[Any]:
# 2,12,16,64
A__ = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
A__ = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
A__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
A__ = embed_positions([2, 16, 768] )[None, None, :, :]
A__ , A__ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
A__ = tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE__ , atol=self.tolerance )
| 104 |
'''simple docstring'''
__magic_name__ : Dict =8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 664 | 0 |
from math import factorial, pi
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : int = 30 ) -> float:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
SCREAMING_SNAKE_CASE_ : int = float(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : int = 30 ) -> float:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
SCREAMING_SNAKE_CASE_ : str = float(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 105 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__magic_name__ : List[Any] =logging.getLogger(__name__)
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : str=-1 ) -> List[str]:
# in NER datasets, the last column is usually reserved for NER label
__magic_name__ = label_idx
def __A ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
__magic_name__ = []
__magic_name__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
__magic_name__ = []
__magic_name__ = []
else:
__magic_name__ = line.split(" " )
words.append(splits[0] )
if len(_lowerCamelCase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
return examples
def __A ( self : Optional[Any] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Union[str, Any]:
__magic_name__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(_lowerCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__magic_name__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_lowerCamelCase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __A ( self : Tuple , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __init__( self : int ) -> str:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __A ( self : int , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
__magic_name__ = f.read().splitlines()
if "O" not in labels:
__magic_name__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase_ ( A ):
"""simple docstring"""
def __A ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__magic_name__ = mode.value
__magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' )
__magic_name__ = 1
__magic_name__ = []
with open(_lowerCamelCase , encoding="utf-8" ) as f:
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = []
__magic_name__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
return examples
def __A ( self : Optional[int] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Any:
__magic_name__ = 0
for sentence in parse_incr(_lowerCamelCase ):
__magic_name__ = preds_list[example_id]
__magic_name__ = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(_lowerCamelCase )
example_id += 1
def __A ( self : Dict , _lowerCamelCase : str ) -> List[str]:
if path:
with open(_lowerCamelCase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 664 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Union[str, Any] =logging.get_logger(__name__)
__snake_case :List[Any] ={
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Dict = 'nllb-moe'
A_ : Optional[int] = ['past_key_values']
A_ : Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , __UpperCamelCase : int=128_112 , __UpperCamelCase : str=1_024 , __UpperCamelCase : Optional[Any]=12 , __UpperCamelCase : str=4_096 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : List[str]=12 , __UpperCamelCase : Optional[Any]=4_096 , __UpperCamelCase : int=16 , __UpperCamelCase : str=0.0_5 , __UpperCamelCase : Optional[Any]=0.0_5 , __UpperCamelCase : Dict=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Any="relu" , __UpperCamelCase : Any=1_024 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : int=0.0 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : str=2 , __UpperCamelCase : str=True , __UpperCamelCase : Dict=False , __UpperCamelCase : List[Any]="float32" , __UpperCamelCase : Tuple=False , __UpperCamelCase : Any=128 , __UpperCamelCase : Optional[int]=64 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Dict=4 , __UpperCamelCase : int=0.0_0_1 , __UpperCamelCase : Dict=0.0_0_1 , __UpperCamelCase : Optional[Any]="all" , __UpperCamelCase : List[str]=False , __UpperCamelCase : Tuple=False , __UpperCamelCase : Any=1.0 , __UpperCamelCase : List[str]=0.2 , __UpperCamelCase : str=1 , __UpperCamelCase : Optional[int]=0 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Dict=False , **__UpperCamelCase : Optional[Any] , ) -> Optional[int]:
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = router_z_loss_coef
A = router_aux_loss_coef
A = decoder_sparse_step
A = encoder_sparse_step
A = num_experts
A = expert_capacity
A = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
A = router_dtype
A = router_ignore_padding_tokens
A = batch_prioritized_routing
A = second_expert_policy
A = normalize_router_prob_before_dropping
A = moe_eval_capacity_token_fraction
A = moe_token_dropout
A = output_router_logits
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , **__UpperCamelCase , ) | 106 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0 ) -> None:
__magic_name__ , __magic_name__ = row, column
__magic_name__ = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self : Optional[Any] ) -> str:
__magic_name__ = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__magic_name__ = 0
for row_vector in self.array:
for obj in row_vector:
__magic_name__ = max(_lowerCamelCase , len(str(_lowerCamelCase ) ) )
__magic_name__ = f'%{max_element_length}s'
# Make string and return
def single_line(_lowerCamelCase : list[float] ) -> str:
nonlocal string_format_identifier
__magic_name__ = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
return str(self )
def __A ( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ) -> bool:
if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] , _lowerCamelCase : tuple[int, int] ) -> Any:
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : float ) -> None:
assert self.validate_indicies(_lowerCamelCase )
__magic_name__ = value
def __add__( self : Union[str, Any] , _lowerCamelCase : Matrix ) -> Matrix:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = -self[r, c]
return result
def __sub__( self : Optional[int] , _lowerCamelCase : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Optional[int] , _lowerCamelCase : int | float | Matrix ) -> Matrix:
if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__magic_name__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__magic_name__ = f'Unsupported type given for another ({type(_lowerCamelCase )})'
raise TypeError(_lowerCamelCase )
def __A ( self : Optional[int] ) -> Matrix:
__magic_name__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c]
return result
def __A ( self : int , _lowerCamelCase : Matrix , _lowerCamelCase : Matrix ) -> Any:
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__magic_name__ = v.transpose()
__magic_name__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
__magic_name__ = 1
print(F'a^(-1) is {ainv}' )
# u, v
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 1, 2, -3
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}' )
def __snake_case ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 664 | 0 |
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class lowercase_ :
"""simple docstring"""
def __init__( self : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int]=None, UpperCamelCase__ : Tuple=None ) -> Any:
_A = start
_A = end
_A = val
_A = (start + end) // 2
_A = left
_A = right
def __repr__( self : str ) -> Optional[int]:
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict, UpperCamelCase__ : Sequence, UpperCamelCase__ : Any ) -> Optional[Any]:
_A = collection
_A = function
if self.collection:
_A = self._build_tree(0, len(UpperCamelCase__ ) - 1 )
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : str, UpperCamelCase__ : Dict ) -> int:
self._update_tree(self.root, UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
return self._query_range(self.root, UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : Any, UpperCamelCase__ : List[Any] ) -> str:
if start == end:
return SegmentTreeNode(UpperCamelCase__, UpperCamelCase__, self.collection[start] )
_A = (start + end) // 2
_A = self._build_tree(UpperCamelCase__, UpperCamelCase__ )
_A = self._build_tree(mid + 1, UpperCamelCase__ )
return SegmentTreeNode(UpperCamelCase__, UpperCamelCase__, self.fn(left.val, right.val ), UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int] ) -> Tuple:
if node.start == i and node.end == i:
_A = val
return
if i <= node.mid:
self._update_tree(node.left, UpperCamelCase__, UpperCamelCase__ )
else:
self._update_tree(node.right, UpperCamelCase__, UpperCamelCase__ )
_A = self.fn(node.left.val, node.right.val )
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : int ) -> Tuple:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left, UpperCamelCase__, UpperCamelCase__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left, UpperCamelCase__, node.mid ), self._query_range(node.right, node.mid + 1, UpperCamelCase__ ), )
else:
# range in right child tree
return self._query_range(node.right, UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
if self.root is not None:
_A = Queue()
queue.put(self.root )
while not queue.empty():
_A = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
_UpperCAmelCase : Tuple = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 107 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__magic_name__ : List[Any] =logging.getLogger(__name__)
__magic_name__ : int ='Hello world! cécé herlolip'
__magic_name__ : List[Any] =namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def __snake_case ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = BertAbsConfig(
temp_dir="." , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__magic_name__ = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage )
__magic_name__ = AbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) , lowerCamelCase_ )
original.eval()
__magic_name__ = BertAbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
__magic_name__ = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
__magic_name__ = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
__magic_name__ = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
__magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__magic_name__ = encoder_input_ids
__magic_name__ = decoder_input_ids
__magic_name__ = __magic_name__ = None
__magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = __magic_name__ = None
__magic_name__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__magic_name__ = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = original.generator(lowerCamelCase_ )
__magic_name__ = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
__magic_name__ = new_model.generator(lowerCamelCase_ )
__magic_name__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) )
__magic_name__ = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__magic_name__ : Dict =argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__magic_name__ : Any =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 664 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Optional[Any]:
assert isinstance(__snake_case , __snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> Optional[Any]:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = ParquetDatasetReader(__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case ).read()
_check_parquet_dataset(__snake_case , __snake_case )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> Optional[int]:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = ParquetDatasetReader(__snake_case , features=__snake_case , cache_dir=__snake_case ).read()
_check_parquet_dataset(__snake_case , __snake_case )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> Any:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = ParquetDatasetReader(__snake_case , cache_dir=__snake_case , split=__snake_case ).read()
_check_parquet_dataset(__snake_case , __snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> Any:
if issubclass(__snake_case , __snake_case ):
_UpperCAmelCase = parquet_path
elif issubclass(__snake_case , __snake_case ):
_UpperCAmelCase = [parquet_path]
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = ParquetDatasetReader(__snake_case , cache_dir=__snake_case ).read()
_check_parquet_dataset(__snake_case , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case=("train",) ) -> List[str]:
assert isinstance(__snake_case , __snake_case )
for split in splits:
_UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> Tuple:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=__snake_case , keep_in_memory=__snake_case ).read()
_check_parquet_datasetdict(__snake_case , __snake_case )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> Optional[Any]:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = ParquetDatasetReader({"""train""": parquet_path} , features=__snake_case , cache_dir=__snake_case ).read()
_check_parquet_datasetdict(__snake_case , __snake_case )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> List[Any]:
if split:
_UpperCAmelCase = {split: parquet_path}
else:
_UpperCAmelCase = """train"""
_UpperCAmelCase = {"""train""": parquet_path, """test""": parquet_path}
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = ParquetDatasetReader(__snake_case , cache_dir=__snake_case ).read()
_check_parquet_datasetdict(__snake_case , __snake_case , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Dict:
_UpperCAmelCase = ParquetDatasetWriter(__snake_case , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_UpperCAmelCase = pq.ParquetFile(tmp_path / """foo.parquet""" )
_UpperCAmelCase = pf.read()
assert dataset.data.table == output_table
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> List[Any]:
_UpperCAmelCase = str(shared_datadir / """test_image_rgb.jpg""" )
_UpperCAmelCase = {"""image""": [image_path]}
_UpperCAmelCase = Features({"""image""": Image()} )
_UpperCAmelCase = Dataset.from_dict(__snake_case , features=__snake_case )
_UpperCAmelCase = ParquetDatasetWriter(__snake_case , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_UpperCAmelCase = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_UpperCAmelCase = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> List[Any]:
assert get_writer_batch_size(__snake_case ) == expected | 108 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> str:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __A ( self : List[Any] ) -> str:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCamelCase ):
DisjunctiveConstraint(_lowerCamelCase ) # fails here
def __A ( self : List[Any] ) -> int:
__magic_name__ = [[1, 2, 3], [1, 2, 4]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(3 )
__magic_name__ = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __A ( self : Any ) -> Union[str, Any]:
__magic_name__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ = DisjunctiveConstraint(_lowerCamelCase )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 664 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__magic_name__ : Dict ={
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Union[str, Any]:
__magic_name__ = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def __A ( cls : Any ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def __A ( self : Optional[Any] ) -> Dict:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCamelCase , repo_id="test-config" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : str ) -> Optional[int]:
__magic_name__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_lowerCamelCase , repo_id="valid_org/test-config-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
__magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : Optional[int] ) -> Union[str, Any]:
CustomConfig.register_for_auto_class()
__magic_name__ = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
__magic_name__ = AutoConfig.from_pretrained(f'{USER}/test-dynamic-config' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Optional[Any]:
__magic_name__ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__magic_name__ = c.n_embd + 1 # int
__magic_name__ = c.resid_pdrop + 1.0 # float
__magic_name__ = not c.scale_attn_weights # bool
__magic_name__ = c.summary_type + "foo" # str
c.update_from_string(
f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(_lowerCamelCase , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(_lowerCamelCase , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(_lowerCamelCase , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(_lowerCamelCase , c.summary_type , "mismatch for key: summary_type" )
def __A ( self : List[Any] ) -> Union[str, Any]:
__magic_name__ = PretrainedConfig()
__magic_name__ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_lowerCamelCase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
__magic_name__ = [key for key, value in config_common_kwargs.items() if value == getattr(_lowerCamelCase , _lowerCamelCase )]
if len(_lowerCamelCase ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f' {", ".join(_lowerCamelCase )}.' )
def __A ( self : List[Any] ) -> List[Any]:
with self.assertRaises(_lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(_lowerCamelCase )
def __A ( self : Tuple ) -> int:
# A mock response for an HTTP head request to emulate server down
__magic_name__ = mock.Mock()
__magic_name__ = 5_00
__magic_name__ = {}
__magic_name__ = HTTPError
__magic_name__ = {}
# Download this model to make sure it's in the cache.
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head:
__magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Union[str, Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
__magic_name__ = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def __A ( self : Dict ) -> Optional[int]:
__magic_name__ = AutoConfig.from_pretrained("bert-base-cased" )
__magic_name__ = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_lowerCamelCase )
__magic_name__ = 2
json.dump(configuration.to_dict() , open(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__magic_name__ = ["config.42.0.0.json"]
__magic_name__ = 7_68
configuration.save_pretrained(_lowerCamelCase )
shutil.move(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , os.path.join(_lowerCamelCase , "config.42.0.0.json" ) )
__magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def __A ( self : Optional[int] ) -> str:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__magic_name__ = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__magic_name__ = "v4.0.0"
__magic_name__ , __magic_name__ = new_transformers.models.auto.AutoConfig.from_pretrained(
_lowerCamelCase , return_unused_kwargs=_lowerCamelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_lowerCamelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__magic_name__ = "v3.0.0"
__magic_name__ = old_transformers.models.auto.AutoConfig.from_pretrained(_lowerCamelCase )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 664 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=10 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=None ,):
lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18}
lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = num_frames
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean
lowerCamelCase__ = image_std
lowerCamelCase__ = crop_size
def UpperCamelCase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = VivitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
lowerCamelCase__ = VivitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(_lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase ,"""size""" ) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
lowerCamelCase__ = prepare_video_inputs(self.image_processor_tester ,equal_resolution=_lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
self.assertIsInstance(video[0] ,Image.Image )
# Test not batched input
lowerCamelCase__ = image_processing(video_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_video_inputs(self.image_processor_tester ,equal_resolution=_lowerCamelCase ,numpify=_lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
self.assertIsInstance(video[0] ,np.ndarray )
# Test not batched input
lowerCamelCase__ = image_processing(video_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCamelCase_ ( self ):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_video_inputs(self.image_processor_tester ,equal_resolution=_lowerCamelCase ,torchify=_lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
self.assertIsInstance(video[0] ,torch.Tensor )
# Test not batched input
lowerCamelCase__ = image_processing(video_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
lowerCamelCase__ = image_processing(_lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 50 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]=7 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[Any]=18 , _lowerCamelCase : Union[str, Any]=30 , _lowerCamelCase : Tuple=4_00 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=True , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , ) -> Dict:
__magic_name__ = size if size is not None else {"height": 18, "width": 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
def __A ( self : int ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( A , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = DPTImageProcessor if is_vision_available() else None
def __A ( self : Dict ) -> Any:
__magic_name__ = DPTImageProcessingTester(self )
@property
def __A ( self : str ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> List[str]:
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size" ) )
def __A ( self : List[str] ) -> List[Any]:
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __A ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Dict ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Optional[int] ) -> Dict:
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
__magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 664 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class a :
'''simple docstring'''
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[jnp.ndarray] = None
__lowerCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def __UpperCamelCase ( cls ) -> str:
return cls()
@dataclass
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : jnp.ndarray
__lowerCAmelCase : jnp.ndarray
__lowerCAmelCase : KarrasVeSchedulerState
class a ( snake_case__ , snake_case__ ):
'''simple docstring'''
@property
def __UpperCamelCase ( self ) -> Union[str, Any]:
return True
@register_to_config
def __init__( self , lowerCamelCase_ = 0.02 , lowerCamelCase_ = 1_0_0 , lowerCamelCase_ = 1.007 , lowerCamelCase_ = 8_0 , lowerCamelCase_ = 0.05 , lowerCamelCase_ = 5_0 , ) -> List[Any]:
pass
def __UpperCamelCase ( self ) -> Dict:
return KarrasVeSchedulerState.create()
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = () ) -> KarrasVeSchedulerState:
_a : int = jnp.arange(0 , _lowerCamelCase )[::-1].copy()
_a : Any = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_lowerCamelCase , schedule=jnp.array(_lowerCamelCase , dtype=jnp.floataa ) , timesteps=_lowerCamelCase , )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
_a : Tuple = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
_a : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
_a : Tuple = random.split(_lowerCamelCase , num=1 )
_a : Optional[int] = self.config.s_noise * random.normal(key=_lowerCamelCase , shape=sample.shape )
_a : List[Any] = sigma + gamma * sigma
_a : Optional[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
_a : str = sample_hat + sigma_hat * model_output
_a : List[str] = (sample_hat - pred_original_sample) / sigma_hat
_a : List[str] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_lowerCamelCase , derivative=_lowerCamelCase , state=_lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
_a : Dict = sample_prev + sigma_prev * model_output
_a : List[Any] = (sample_prev - pred_original_sample) / sigma_prev
_a : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_lowerCamelCase , derivative=_lowerCamelCase , state=_lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
raise NotImplementedError()
| 120 |
'''simple docstring'''
import numpy
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : numpy.ndarray ) -> None:
__magic_name__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__magic_name__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__magic_name__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__magic_name__ = numpy.random.rand(3 , 1 )
# Real output values provided.
__magic_name__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__magic_name__ = numpy.zeros(output_array.shape )
def __A ( self : int ) -> numpy.ndarray:
__magic_name__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __A ( self : Dict ) -> None:
__magic_name__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__magic_name__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__magic_name__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __A ( self : Optional[int] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : int , _lowerCamelCase : bool ) -> None:
for iteration in range(1 , iterations + 1 ):
__magic_name__ = self.feedforward()
self.back_propagation()
if give_loss:
__magic_name__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def __A ( self : Tuple , _lowerCamelCase : numpy.ndarray ) -> int:
__magic_name__ = input_arr
__magic_name__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__magic_name__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def __snake_case ( lowerCamelCase_ : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__magic_name__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__magic_name__ = TwoHiddenLayerNeuralNetwork(
input_array=lowerCamelCase_ , output_array=lowerCamelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowerCamelCase_ , iterations=10 , give_loss=lowerCamelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=30 , UpperCamelCase__=4_00 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=True , UpperCamelCase__=1 / 2_55 , UpperCamelCase__=True , ):
'''simple docstring'''
A__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def lowercase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["shortest_edge"] * h / w )
A__ = self.size["shortest_edge"]
elif w > h:
A__ = self.size["shortest_edge"]
A__ = int(self.size["shortest_edge"] * w / h )
else:
A__ = self.size["shortest_edge"]
A__ = self.size["shortest_edge"]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(_lowerCamelCase , key=lambda UpperCamelCase__ : item[0] )[0]
A__ = max(_lowerCamelCase , key=lambda UpperCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : Optional[int] = DetaImageProcessor if is_vision_available() else None
def lowercase_ ( self ):
'''simple docstring'''
A__ = DetaImageProcessingTester(self )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(_lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(_lowerCamelCase , "size" ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , _lowerCamelCase )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
A__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"image_id": 3_97_69, "annotations": target}
# encode them
A__ = DetaImageProcessor()
A__ = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , _lowerCamelCase )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _lowerCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _lowerCamelCase )
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _lowerCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _lowerCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _lowerCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _lowerCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _lowerCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _lowerCamelCase ) )
@slow
def lowercase_ ( self ):
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
A__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
A__ = DetaImageProcessor(format="coco_panoptic" )
A__ = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , _lowerCamelCase )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _lowerCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _lowerCamelCase )
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _lowerCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _lowerCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _lowerCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _lowerCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _lowerCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _lowerCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _lowerCamelCase ) ) | 337 |
'''simple docstring'''
import torch
from transformers import AutoModel
class UpperCamelCase_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Any , _lowerCamelCase : Optional[int]="sayef/fsner-bert-base-uncased" ) -> List[Any]:
super(_lowerCamelCase , self ).__init__()
__magic_name__ = AutoModel.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
__magic_name__ = torch.nn.CosineSimilarity(3 , 1e-08 )
__magic_name__ = torch.nn.Softmax(dim=1 )
def __A ( self : Tuple , **_lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return self.bert(**_lowerCamelCase ).last_hidden_state
def __A ( self : Dict , _lowerCamelCase : Dict ) -> Dict:
return token_embeddings.sum(2 , keepdim=_lowerCamelCase )
def __A ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Tuple=1 ) -> Optional[Any]:
return self.softmax(T * self.cos(_lowerCamelCase , _lowerCamelCase ) )
def __A ( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ) -> List[str]:
__magic_name__ = W_supports["sizes"].tolist()
__magic_name__ = W_supports["start_token_id"].item()
__magic_name__ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = self.BERT(**_lowerCamelCase )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = W_supports["input_ids"] == start_token_id
__magic_name__ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_lowerCamelCase ):
if i == 0:
__magic_name__ = 0
else:
__magic_name__ = support_sizes[i - 1]
__magic_name__ = S[s : s + size][start_token_masks[s : s + size]]
__magic_name__ = S[s : s + size][end_token_masks[s : s + size]]
__magic_name__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__magic_name__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__magic_name__ = torch.vstack((p_starts, p_start) )
__magic_name__ = torch.vstack((p_ends, p_end) )
else:
__magic_name__ = p_start
__magic_name__ = p_end
return p_starts, p_ends
| 664 | 0 |
SCREAMING_SNAKE_CASE__ : List[Any] = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def A ( _SCREAMING_SNAKE_CASE ) -> int:
assert type(lowerCamelCase_ ) in (int, float) and decimal == int(lowerCamelCase_ )
lowerCamelCase : int = int(lowerCamelCase_ )
lowerCamelCase : List[Any] = ""
lowerCamelCase : Any = False
if decimal < 0:
lowerCamelCase : Optional[int] = True
decimal *= -1
while decimal > 0:
lowerCamelCase , lowerCamelCase : Optional[Any] = divmod(lowerCamelCase_ ,16 )
lowerCamelCase : Any = values[remainder] + hexadecimal
lowerCamelCase : List[str] = "0x" + hexadecimal
if negative:
lowerCamelCase : Any = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 664 | 0 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : List[str] = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
_A = '''xlm-prophetnet'''
_A = ['''past_key_values''']
_A = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__(self , __a = 0.1 , __a = "gelu" , __a = 3_05_22 , __a = 10_24 , __a = 40_96 , __a = 12 , __a = 16 , __a = 40_96 , __a = 12 , __a = 16 , __a = 0.1 , __a = 0.1 , __a = 5_12 , __a = 0.02 , __a = True , __a = True , __a = 0 , __a = 2 , __a = 32 , __a = 1_28 , __a = False , __a = 0.0 , __a = True , __a = 0 , __a = 1 , __a = 2 , **__a , ):
'''simple docstring'''
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = encoder_ffn_dim
lowerCamelCase = num_encoder_layers
lowerCamelCase = num_encoder_attention_heads
lowerCamelCase = decoder_ffn_dim
lowerCamelCase = num_decoder_layers
lowerCamelCase = num_decoder_attention_heads
lowerCamelCase = max_position_embeddings
lowerCamelCase = init_std # Normal(0, this parameter)
lowerCamelCase = activation_function
# parameters for xlmprophetnet
lowerCamelCase = ngram
lowerCamelCase = num_buckets
lowerCamelCase = relative_max_distance
lowerCamelCase = disable_ngram_loss
lowerCamelCase = eps
# 3 Types of Dropout
lowerCamelCase = attention_dropout
lowerCamelCase = activation_dropout
lowerCamelCase = dropout
lowerCamelCase = use_cache
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , add_cross_attention=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
@property
def _a (self ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _a (self , __a ):
'''simple docstring'''
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." ) | 623 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __snake_case ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = AutoConfig.from_pretrained(lowerCamelCase_ )
__magic_name__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_ )
__magic_name__ = checkpoints.load_tax_checkpoint(lowerCamelCase_ )
__magic_name__ = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
__magic_name__ = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__magic_name__ = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["encoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_global_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = tax_mlp_layer_norm
__magic_name__ = flax_model_encoder_layer_block
# Only for layer 0:
__magic_name__ = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_encoder_global_rel_embedding
# Assigning
__magic_name__ = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
__magic_name__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__magic_name__ = F'layers_{str(lowerCamelCase_ )}'
# Self-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
__magic_name__ = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
__magic_name__ = tax_enc_dec_attention_module["key"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["out"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["query"]["kernel"]
__magic_name__ = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
__magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
__magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
__magic_name__ = flax_model.params["decoder"]["block"][str(lowerCamelCase_ )]["layer"]
__magic_name__ = tax_attention_key
__magic_name__ = tax_attention_out
__magic_name__ = tax_attention_query
__magic_name__ = tax_attention_value
__magic_name__ = tax_pre_attention_layer_norm
__magic_name__ = tax_enc_dec_attention_key
__magic_name__ = tax_enc_dec_attention_out
__magic_name__ = tax_enc_dec_attention_query
__magic_name__ = tax_enc_dec_attention_value
__magic_name__ = tax_cross_layer_norm
if split_mlp_wi:
__magic_name__ = tax_mlp_wi_a
__magic_name__ = tax_mlp_wi_a
else:
__magic_name__ = tax_mlp_wi
__magic_name__ = tax_mlp_wo
__magic_name__ = txa_mlp_layer_norm
__magic_name__ = flax_model_decoder_layer_block
# Decoder Normalization
__magic_name__ = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
__magic_name__ = txa_decoder_norm
# Only for layer 0:
__magic_name__ = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
__magic_name__ = tax_decoder_rel_embedding
# Token Embeddings
__magic_name__ = tax_model["target"]["token_embedder"]["embedding"]
__magic_name__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__magic_name__ = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(lowerCamelCase_ )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
__magic_name__ : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
__magic_name__ : Optional[int] =parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 664 | 0 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Any , __A : float ):
return 0.0
def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : int ):
snake_case__ : Dict = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
snake_case__ : str = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE ( snake_case_ : FilterType , snake_case_ : int ):
snake_case__ : List[Any] = 512
snake_case__ : Optional[int] = [1] + [0] * (size - 1)
snake_case__ : Tuple = [filter_type.process(lowerCamelCase_ ) for item in inputs]
snake_case__ : Union[str, Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
snake_case__ : List[Any] = np.abs(np.fft.fft(lowerCamelCase_ ) )
snake_case__ : Dict = 20 * np.logaa(lowerCamelCase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
snake_case__ : List[str] = get_bounds(lowerCamelCase_ , lowerCamelCase_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(lowerCamelCase_ )
plt.show()
def SCREAMING_SNAKE_CASE ( snake_case_ : FilterType , snake_case_ : int ):
snake_case__ : Dict = 512
snake_case__ : Optional[int] = [1] + [0] * (size - 1)
snake_case__ : Optional[int] = [filter_type.process(lowerCamelCase_ ) for item in inputs]
snake_case__ : Tuple = [0] * (samplerate - size) # zero-padding
outputs += filler
snake_case__ : Tuple = np.angle(np.fft.fft(lowerCamelCase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(lowerCamelCase_ , -2 * pi ) )
plt.show()
| 297 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase_ ( unittest.TestCase , A ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> Any:
__magic_name__ = load_tool("text-to-speech" )
self.tool.setup()
def __A ( self : Union[str, Any] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def __A ( self : List[str] ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ = self.tool("hey" )
__magic_name__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 664 | 0 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _lowercase ( unittest.TestCase ):
lowercase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : Tuple , snake_case : Dict , snake_case : Tuple ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Tuple = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase_ : List[str] = VideoClassificationPipeline(model=_lowerCamelCase , image_processor=_lowerCamelCase , top_k=2 )
UpperCamelCase_ : Dict = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : Optional[Any] , snake_case : int ) -> Optional[int]:
"""simple docstring"""
for example in examples:
UpperCamelCase_ : Union[str, Any] = video_classifier(_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{'score': ANY(_lowerCamelCase ), 'label': ANY(_lowerCamelCase )},
{'score': ANY(_lowerCamelCase ), 'label': ANY(_lowerCamelCase )},
] , )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCamelCase_ : str = VideoMAEFeatureExtractor(
size={'shortest_edge': 1_0} , crop_size={'height': 1_0, 'width': 1_0} )
UpperCamelCase_ : List[Any] = pipeline(
'video-classification' , model=_lowerCamelCase , feature_extractor=_lowerCamelCase , frame_sampling_rate=4 )
UpperCamelCase_ : Optional[Any] = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase_ : str = video_classifier(_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}] , )
UpperCamelCase_ : str = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
[{'score': 0.5199, 'label': 'LABEL_0'}, {'score': 0.4801, 'label': 'LABEL_1'}],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
pass
| 417 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__magic_name__ : Dict =re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
__magic_name__ : int =10
__magic_name__ : Union[str, Any] =2_56
def __snake_case ( lowerCamelCase_ : List[str] ):
'''simple docstring'''
if len(lowerCamelCase_ ) < MIN_NUM_TOKENS:
return None
__magic_name__ = MinHash(num_perm=lowerCamelCase_ )
for token in set(lowerCamelCase_ ):
min_hash.update(token.encode() )
return min_hash
def __snake_case ( lowerCamelCase_ : str ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(lowerCamelCase_ ) if len(t.strip() ) > 0}
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *,
_lowerCamelCase : float = 0.85 , ) -> Optional[Any]:
__magic_name__ = duplication_jaccard_threshold
__magic_name__ = NUM_PERM
__magic_name__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__magic_name__ = defaultdict(_lowerCamelCase )
def __A ( self : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : MinHash ) -> None:
__magic_name__ = self._index.query(_lowerCamelCase )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCamelCase )
def __A ( self : Union[str, Any] ) -> List[List[Dict]]:
__magic_name__ = []
for base, duplicates in self._duplicate_clusters.items():
__magic_name__ = [base] + list(_lowerCamelCase )
# reformat the cluster to be a list of dict
__magic_name__ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(_lowerCamelCase )
return duplicate_clusters
def __A ( self : Tuple , _lowerCamelCase : Tuple ) -> None:
__magic_name__ = self.get_duplicate_clusters()
with open(_lowerCamelCase , "w" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def __snake_case ( lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ , __magic_name__ = element
__magic_name__ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __snake_case ( lowerCamelCase_ : Type[Dataset] ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCamelCase_ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float ):
'''simple docstring'''
__magic_name__ = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCamelCase_ , lowerCamelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = get_tokens(lowerCamelCase_ )
__magic_name__ = get_tokens(lowerCamelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__magic_name__ : List[str] =None
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ = []
for elementa in cluster:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
__magic_name__ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(lowerCamelCase_ , lowerCamelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__magic_name__ = 1
extremes.append(lowerCamelCase_ )
return extremes
def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
global _shared_dataset
__magic_name__ = dataset
__magic_name__ = []
__magic_name__ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCamelCase_ , lowerCamelCase_ , ) , total=len(lowerCamelCase_ ) , ):
extremes_list.append(lowerCamelCase_ )
return extremes_list
def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float = 0.85 ):
'''simple docstring'''
__magic_name__ = make_duplicate_clusters(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
__magic_name__ = {}
__magic_name__ = find_extremes(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__magic_name__ = element
__magic_name__ = duplicate_indices - set(extreme_dict.keys() )
__magic_name__ = dataset.filter(lambda lowerCamelCase_ , lowerCamelCase_ : idx not in remove_indices , with_indices=lowerCamelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__magic_name__ = element["base_index"] in extreme_dict
if element["is_extreme"]:
__magic_name__ = extreme_dict[element["base_index"]]["copies"]
print(F'Original dataset size: {len(lowerCamelCase_ )}' )
print(F'Number of duplicate clusters: {len(lowerCamelCase_ )}' )
print(F'Files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Unique files in duplicate cluster: {len(lowerCamelCase_ )}' )
print(F'Filtered dataset size: {len(lowerCamelCase_ )}' )
return ds_filter, duplicate_clusters
| 664 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DPTImageProcessor instead.""" , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 384 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
__magic_name__ : Optional[int] =logging.get_logger(__name__)
__magic_name__ : Tuple ='The Nymphenburg Palace is a beautiful palace in Munich!'
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
__magic_name__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__magic_name__ = os.path.join(get_home_dir() , "models" )
__magic_name__ = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__magic_name__ = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__magic_name__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(lowerCamelCase_ ),
}
__magic_name__ = BertConfig.from_dict(lowerCamelCase_ )
__magic_name__ = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ):
__magic_name__ = hf_param.shape
__magic_name__ = to_torch(params[gluon_param] )
__magic_name__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ = layer.attention.self
__magic_name__ = check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__magic_name__ = check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__magic_name__ = check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__magic_name__ = check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__magic_name__ = check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__magic_name__ = check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__magic_name__ = layer.attention.output
__magic_name__ = check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
__magic_name__ = check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__magic_name__ = layer.intermediate
__magic_name__ = check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__magic_name__ = check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__magic_name__ = layer.output
__magic_name__ = check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__magic_name__ = check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ = RobertaTokenizer.from_pretrained("roberta-base" )
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ )["input_ids"]
# Get gluon output
__magic_name__ = mx.nd.array([input_ids] )
__magic_name__ = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__magic_name__ = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ , return_tensors="pt" )
__magic_name__ = hf_bort_model(**lowerCamelCase_ )[0]
__magic_name__ = output_gluon[0].asnumpy()
__magic_name__ = output_hf[0].detach().numpy()
__magic_name__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , lowerCamelCase_ )
if __name__ == "__main__":
__magic_name__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ : Optional[Any] =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 664 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = ['''image_processor''', '''tokenizer''']
_a = '''AutoImageProcessor'''
_a = '''AutoTokenizer'''
def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Dict ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase = self.image_processor
def __call__( self : List[Any] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Tuple ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowerCAmelCase = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if images is not None:
lowerCAmelCase = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def __lowercase ( self : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : Any ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def __lowercase ( self : Optional[Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def __lowercase ( self : List[str] ):
return ["input_ids", "attention_mask", "pixel_values"]
| 169 |
'''simple docstring'''
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 | 0 |
from math import ceil, sqrt
def UpperCamelCase ( _UpperCAmelCase : int = 100_0000 ) -> Any:
'''simple docstring'''
_lowercase : Dict = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_lowercase : Optional[int] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_lowercase : Any = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 461 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__magic_name__ : Tuple =threading.Lock()
__magic_name__ : Optional[logging.Handler] =None
__magic_name__ : List[str] ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__magic_name__ : str =logging.WARNING
__magic_name__ : Any =True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_VERBOSITY" , lowerCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def __snake_case ( ):
'''simple docstring'''
return __name__.split("." )[0]
def __snake_case ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__magic_name__ = logging.StreamHandler() # Set sys.stderr as stream.
__magic_name__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
__magic_name__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
__magic_name__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__magic_name__ = None
def __snake_case ( ):
'''simple docstring'''
return log_levels
def __snake_case ( lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if name is None:
__magic_name__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __snake_case ( lowerCamelCase_ : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
return set_verbosity(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCamelCase_ )
def __snake_case ( lowerCamelCase_ : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = False
def __snake_case ( ):
'''simple docstring'''
_configure_library_root_logger()
__magic_name__ = True
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
__magic_name__ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCamelCase_ )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Any ):
'''simple docstring'''
__magic_name__ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , lowerCamelCase_ )
if no_advisory_warnings:
return
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : int =warning_advice
@functools.lru_cache(lowerCamelCase_ )
def __snake_case ( self : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : int ):
'''simple docstring'''
self.warning(*lowerCamelCase_ , **lowerCamelCase_ )
__magic_name__ : Optional[int] =warning_once
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ) -> Any: # pylint: disable=unused-argument
__magic_name__ = args[0] if args else None
def __iter__( self : int ) -> Tuple:
return iter(self._iterator )
def __getattr__( self : List[Any] , _lowerCamelCase : int ) -> List[Any]:
def empty_fn(*_lowerCamelCase : List[str] , **_lowerCamelCase : List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ) -> Any:
return self
def __exit__( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ) -> Dict:
return
class UpperCamelCase_ :
"""simple docstring"""
def __call__( self : Any , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Any ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*_lowerCamelCase , **_lowerCamelCase )
else:
return EmptyTqdm(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : Optional[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ) -> Union[str, Any]:
__magic_name__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCamelCase , **_lowerCamelCase )
def __A ( self : str ) -> Any:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__magic_name__ : List[Any] =_tqdm_cls()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = True
hf_hub_utils.enable_progress_bars()
def __snake_case ( ):
'''simple docstring'''
global _tqdm_active
__magic_name__ = False
hf_hub_utils.disable_progress_bars()
| 664 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.