code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import warnings from .generation import TFGenerationMixin class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' warnings.warn( '''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ''' '''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , lowerCamelCase , )
717
'''simple docstring''' from heapq import heappop, heappush import numpy as np def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : bool , ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ = grid.shape UpperCAmelCase_ = [-1, 1, 0, 0] UpperCAmelCase_ = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] UpperCAmelCase_ , UpperCAmelCase_ = [(0, source)], set() UpperCAmelCase_ = np.full((rows, cols) , np.inf ) UpperCAmelCase_ = 0 UpperCAmelCase_ = np.empty((rows, cols) , dtype=_UpperCamelCase ) UpperCAmelCase_ = None while queue: ((UpperCAmelCase_) , (UpperCAmelCase_)) = heappop(_UpperCamelCase ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: UpperCAmelCase_ = [] while (x, y) != source: path.append((x, y) ) UpperCAmelCase_ , UpperCAmelCase_ = predecessors[x, y] path.append(_UpperCamelCase ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(_UpperCamelCase ) ): UpperCAmelCase_ , UpperCAmelCase_ = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: UpperCAmelCase_ = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(_UpperCamelCase , (dist + 1, (nx, ny)) ) UpperCAmelCase_ = dist + 1 UpperCAmelCase_ = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
43
0
'''simple docstring''' from torch import nn def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ): '''simple docstring''' if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"""Unsupported activation function: {act_fn}""" )
718
'''simple docstring''' import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = XLMTokenizer lowerCAmelCase__ = False def lowerCAmelCase__ ( self : int ) ->Union[str, Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) UpperCAmelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase__ ) ) def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any ) ->List[Any]: UpperCAmelCase_ = '''lower newer''' UpperCAmelCase_ = '''lower newer''' return input_text, output_text def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple: UpperCAmelCase_ = XLMTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase_ = '''lower''' UpperCAmelCase_ = ['''low''', '''er</w>'''] UpperCAmelCase_ = tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = tokens + ['''<unk>'''] UpperCAmelCase_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Any ) ->str: UpperCAmelCase_ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' ) UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
43
0
'''simple docstring''' import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class lowerCamelCase : '''simple docstring''' def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]=13 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Any=99 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Union[str, Any]=37 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[str]=50 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=None , ) ->List[str]: UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_input_mask UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = initializer_range UpperCAmelCase_ = use_labels UpperCAmelCase_ = scope def lowerCAmelCase__ ( self : Dict ) ->str: UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ = None if self.use_input_mask: UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ = self.get_config() return config, input_ids, input_mask, token_labels def lowerCAmelCase__ ( self : List[Any] ) ->Any: return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=A__ , initializer_range=self.initializer_range , ) def lowerCAmelCase__ ( self : int ) ->Tuple: ( UpperCAmelCase_ ) = self.prepare_config_and_inputs() UpperCAmelCase_ = True UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict , ) ->List[Any]: UpperCAmelCase_ = BertGenerationEncoder(config=A__ ) model.to(A__ ) model.eval() UpperCAmelCase_ = model(A__ , attention_mask=A__ ) UpperCAmelCase_ = model(A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , **UpperCAmelCase__ : List[Any] , ) ->Optional[int]: UpperCAmelCase_ = True UpperCAmelCase_ = BertGenerationEncoder(config=A__ ) model.to(A__ ) model.eval() UpperCAmelCase_ = model( A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , ) UpperCAmelCase_ = model( A__ , attention_mask=A__ , encoder_hidden_states=A__ , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , **UpperCAmelCase__ : int , ) ->List[Any]: UpperCAmelCase_ = True UpperCAmelCase_ = True UpperCAmelCase_ = BertGenerationDecoder(config=A__ ).to(A__ ).eval() # first forward pass UpperCAmelCase_ = model( A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , use_cache=A__ , ) UpperCAmelCase_ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase_ = model( A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , output_hidden_states=A__ , )["""hidden_states"""][0] UpperCAmelCase_ = model( A__ , attention_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , past_key_values=A__ , output_hidden_states=A__ , )["""hidden_states"""][0] # select random slice UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A__ , A__ , atol=1e-3 ) ) def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Tuple , ) ->List[str]: UpperCAmelCase_ = BertGenerationDecoder(A__ ) model.to(A__ ) model.eval() UpperCAmelCase_ = model(A__ , attention_mask=A__ , labels=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self : List[str] ) ->str: UpperCAmelCase_ = self.prepare_config_and_inputs() UpperCAmelCase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCamelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowerCAmelCase__ = (BertGenerationDecoder,) if is_torch_available() else () lowerCAmelCase__ = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def lowerCAmelCase__ ( self : Any ) ->Optional[Any]: UpperCAmelCase_ = BertGenerationEncoderTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=A__ , hidden_size=37 ) def lowerCAmelCase__ ( self : Tuple ) ->Any: self.config_tester.run_common_tests() def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any: UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]: UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() UpperCAmelCase_ = """bert""" self.model_tester.create_and_check_model(A__ , A__ , A__ , A__ ) def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple: UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*A__ ) def lowerCAmelCase__ ( self : Dict ) ->List[Any]: UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*A__ ) def lowerCAmelCase__ ( self : List[Any] ) ->Any: # This regression test was failing with PyTorch < 1.3 ( UpperCAmelCase_ ) = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase_ = None self.model_tester.create_and_check_model_as_decoder( A__ , A__ , A__ , A__ , A__ , A__ , ) def lowerCAmelCase__ ( self : Tuple ) ->List[str]: UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*A__ ) @slow def lowerCAmelCase__ ( self : str ) ->Tuple: UpperCAmelCase_ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) self.assertIsNotNone(A__ ) @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]: UpperCAmelCase_ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) UpperCAmelCase_ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): UpperCAmelCase_ = model(A__ )[0] UpperCAmelCase_ = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , A__ ) UpperCAmelCase_ = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=1e-4 ) ) @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase__ ( self : Any ) ->Dict: UpperCAmelCase_ = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) UpperCAmelCase_ = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): UpperCAmelCase_ = model(A__ )[0] UpperCAmelCase_ = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape , A__ ) UpperCAmelCase_ = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=1e-4 ) )
719
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ): '''simple docstring''' for param in module.parameters(): UpperCAmelCase_ = False def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCAmelCase_ = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ = plt.imshow(_UpperCamelCase ) fig.axes.get_xaxis().set_visible(_UpperCamelCase ) fig.axes.get_yaxis().set_visible(_UpperCamelCase ) plt.show() def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = datetime.now() UpperCAmelCase_ = current_time.strftime('''%H:%M:%S''' ) return timestamp
43
0
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Any ) ->int: UpperCAmelCase_ = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() UpperCAmelCase_ = dict(zip(_a , range(len(_a ) ) ) ) UpperCAmelCase_ = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } UpperCAmelCase_ = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 1_6000, '''return_attention_mask''': False, '''do_normalize''': True, } UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , _a ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_a ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_a ) + '''\n''' ) # load decoder from hub UpperCAmelCase_ = '''hf-internal-testing/ngram-beam-search-decoder''' def lowerCAmelCase__ ( self : Any , **UpperCAmelCase__ : Optional[Any] ) ->Dict: UpperCAmelCase_ = self.add_kwargs_tokens_map.copy() kwargs.update(_a ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_a ) def lowerCAmelCase__ ( self : int , **UpperCAmelCase__ : List[Any] ) ->Optional[Any]: return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_a ) def lowerCAmelCase__ ( self : Optional[int] , **UpperCAmelCase__ : Union[str, Any] ) ->List[str]: return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_a ) def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]: shutil.rmtree(self.tmpdirname ) def lowerCAmelCase__ ( self : int ) ->Optional[Any]: UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_feature_extractor() UpperCAmelCase_ = self.get_decoder() UpperCAmelCase_ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , _a ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _a ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , _a ) def lowerCAmelCase__ ( self : Optional[int] ) ->str: UpperCAmelCase_ = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase_ = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def lowerCAmelCase__ ( self : Optional[int] ) ->int: UpperCAmelCase_ = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(_a , '''include''' ): WavaVecaProcessorWithLM( tokenizer=_a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def lowerCAmelCase__ ( self : Optional[int] ) ->int: UpperCAmelCase_ = self.get_feature_extractor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_decoder() UpperCAmelCase_ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) UpperCAmelCase_ = floats_list((3, 1000) ) UpperCAmelCase_ = feature_extractor(_a , return_tensors='''np''' ) UpperCAmelCase_ = processor(_a , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowerCAmelCase__ ( self : List[str] ) ->List[Any]: UpperCAmelCase_ = self.get_feature_extractor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_decoder() UpperCAmelCase_ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) UpperCAmelCase_ = '''This is a test string''' UpperCAmelCase_ = processor(text=_a ) UpperCAmelCase_ = tokenizer(_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : str=(2, 10, 16) , UpperCAmelCase__ : Any=77 ) ->List[Any]: np.random.seed(_a ) return np.random.rand(*_a ) def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]: UpperCAmelCase_ = self.get_feature_extractor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_decoder() UpperCAmelCase_ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) UpperCAmelCase_ = self._get_dummy_logits(shape=(10, 16) , seed=13 ) UpperCAmelCase_ = processor.decode(_a ) UpperCAmelCase_ = decoder.decode_beams(_a )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Optional[Any] ) ->List[str]: UpperCAmelCase_ = self.get_feature_extractor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_decoder() UpperCAmelCase_ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) UpperCAmelCase_ = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase_ = processor.batch_decode(_a ) else: with get_context(_a ).Pool() as pool: UpperCAmelCase_ = processor.batch_decode(_a , _a ) UpperCAmelCase_ = list(_a ) with get_context('''fork''' ).Pool() as p: UpperCAmelCase_ = decoder.decode_beams_batch(_a , _a ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(_a , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(_a , decoded_processor.logit_score ) self.assertListEqual(_a , decoded_processor.lm_score ) def lowerCAmelCase__ ( self : Tuple ) ->Tuple: UpperCAmelCase_ = self.get_feature_extractor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_decoder() UpperCAmelCase_ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) UpperCAmelCase_ = self._get_dummy_logits() UpperCAmelCase_ = 15 UpperCAmelCase_ = -20.0 UpperCAmelCase_ = -4.0 UpperCAmelCase_ = processor.batch_decode( _a , beam_width=_a , beam_prune_logp=_a , token_min_logp=_a , ) UpperCAmelCase_ = decoded_processor_out.text UpperCAmelCase_ = list(_a ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase_ = decoder.decode_beams_batch( _a , _a , beam_width=_a , beam_prune_logp=_a , token_min_logp=_a , ) UpperCAmelCase_ = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase_ = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase_ = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(_a , _a ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _a ) self.assertTrue(np.array_equal(_a , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , _a , atol=1e-3 ) ) self.assertTrue(np.array_equal(_a , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9474] , _a , atol=1e-3 ) ) def lowerCAmelCase__ ( self : Tuple ) ->List[Any]: UpperCAmelCase_ = self.get_feature_extractor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_decoder() UpperCAmelCase_ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) UpperCAmelCase_ = self._get_dummy_logits() UpperCAmelCase_ = 2.0 UpperCAmelCase_ = 5.0 UpperCAmelCase_ = -20.0 UpperCAmelCase_ = True UpperCAmelCase_ = processor.batch_decode( _a , alpha=_a , beta=_a , unk_score_offset=_a , lm_score_boundary=_a , ) UpperCAmelCase_ = decoded_processor_out.text UpperCAmelCase_ = list(_a ) decoder.reset_params( alpha=_a , beta=_a , unk_score_offset=_a , lm_score_boundary=_a , ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase_ = decoder.decode_beams_batch( _a , _a , ) UpperCAmelCase_ = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(_a , _a ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _a ) UpperCAmelCase_ = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , _a ) def lowerCAmelCase__ ( self : str ) ->Tuple: UpperCAmelCase_ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase_ = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase_ = os.listdir(_a ) UpperCAmelCase_ = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(_a , _a ) def lowerCAmelCase__ ( self : Tuple ) ->int: UpperCAmelCase_ = snapshot_download('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ = WavaVecaProcessorWithLM.from_pretrained(_a ) UpperCAmelCase_ = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase_ = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase_ = os.listdir(_a ) UpperCAmelCase_ = os.listdir(_a ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(_a , _a ) def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict: UpperCAmelCase_ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ = floats_list((3, 1000) ) UpperCAmelCase_ = processor_wavaveca(_a , return_tensors='''np''' ) UpperCAmelCase_ = processor_auto(_a , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) UpperCAmelCase_ = self._get_dummy_logits() UpperCAmelCase_ = processor_wavaveca.batch_decode(_a ) UpperCAmelCase_ = processor_auto.batch_decode(_a ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def lowerCAmelCase__ ( self : List[str] ) ->int: UpperCAmelCase_ = self.get_feature_extractor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_decoder() UpperCAmelCase_ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def lowerCAmelCase__ ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict ) ->List[str]: UpperCAmelCase_ = [d[key] for d in offsets] return retrieved_list def lowerCAmelCase__ ( self : Any ) ->List[str]: UpperCAmelCase_ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ = self._get_dummy_logits()[0] UpperCAmelCase_ = processor.decode(_a , output_word_offsets=_a ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_a , _a ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def lowerCAmelCase__ ( self : Any ) ->Optional[Any]: UpperCAmelCase_ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ = self._get_dummy_logits() UpperCAmelCase_ = processor.batch_decode(_a , output_word_offsets=_a ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_a , _a ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(_a , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple: import torch UpperCAmelCase_ = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_a ) UpperCAmelCase_ = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_6000 ) ) UpperCAmelCase_ = iter(_a ) UpperCAmelCase_ = next(_a ) UpperCAmelCase_ = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) UpperCAmelCase_ = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase_ = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): UpperCAmelCase_ = model(_a ).logits.cpu().numpy() UpperCAmelCase_ = processor.decode(logits[0] , output_word_offsets=_a ) UpperCAmelCase_ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase_ = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] UpperCAmelCase_ = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(_a , '''word''' ) ) , _a ) self.assertEqual(''' '''.join(self.get_from_offsets(_a , '''word''' ) ) , output.text ) # output times UpperCAmelCase_ = torch.tensor(self.get_from_offsets(_a , '''start_time''' ) ) UpperCAmelCase_ = torch.tensor(self.get_from_offsets(_a , '''end_time''' ) ) # fmt: off UpperCAmelCase_ = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] ) UpperCAmelCase_ = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(_a , _a , atol=0.01 ) ) self.assertTrue(torch.allclose(_a , _a , atol=0.01 ) )
720
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Any ) ->Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self : str ) ->List[str]: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_euler''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase__ ( self : List[str] ) ->int: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_euler''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=UpperCAmelCase__ , ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array( [0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
43
0
'''simple docstring''' import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowercase__ : List[str] = ( """This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[Any] ): '''simple docstring''' warnings.warn(_lowerCamelCase , _lowerCamelCase ) requires_backends(_lowerCamelCase , '''sklearn''' ) return (preds == labels).mean() def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ): '''simple docstring''' warnings.warn(_lowerCamelCase , _lowerCamelCase ) requires_backends(_lowerCamelCase , '''sklearn''' ) UpperCAmelCase_ = simple_accuracy(_lowerCamelCase , _lowerCamelCase ) UpperCAmelCase_ = fa_score(y_true=_lowerCamelCase , y_pred=_lowerCamelCase ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] ): '''simple docstring''' warnings.warn(_lowerCamelCase , _lowerCamelCase ) requires_backends(_lowerCamelCase , '''sklearn''' ) UpperCAmelCase_ = pearsonr(_lowerCamelCase , _lowerCamelCase )[0] UpperCAmelCase_ = spearmanr(_lowerCamelCase , _lowerCamelCase )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ): '''simple docstring''' warnings.warn(_lowerCamelCase , _lowerCamelCase ) requires_backends(_lowerCamelCase , '''sklearn''' ) assert len(_lowerCamelCase ) == len(_lowerCamelCase ), F"""Predictions and labels have mismatched lengths {len(_lowerCamelCase )} and {len(_lowerCamelCase )}""" if task_name == "cola": return {"mcc": matthews_corrcoef(_lowerCamelCase , _lowerCamelCase )} elif task_name == "sst-2": return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )} elif task_name == "mrpc": return acc_and_fa(_lowerCamelCase , _lowerCamelCase ) elif task_name == "sts-b": return pearson_and_spearman(_lowerCamelCase , _lowerCamelCase ) elif task_name == "qqp": return acc_and_fa(_lowerCamelCase , _lowerCamelCase ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )} elif task_name == "qnli": return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )} elif task_name == "rte": return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )} elif task_name == "wnli": return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )} elif task_name == "hans": return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )} else: raise KeyError(_lowerCamelCase ) def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn(_lowerCamelCase , _lowerCamelCase ) requires_backends(_lowerCamelCase , '''sklearn''' ) if len(_lowerCamelCase ) != len(_lowerCamelCase ): raise ValueError(F"""Predictions and labels have mismatched lengths {len(_lowerCamelCase )} and {len(_lowerCamelCase )}""" ) if task_name == "xnli": return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )} else: raise KeyError(_lowerCamelCase )
721
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = MvpTokenizer lowerCAmelCase__ = MvpTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = filter_roberta_detectors def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple: super().setUp() UpperCAmelCase_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) UpperCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] UpperCAmelCase_ = {'''unk_token''': '''<unk>'''} UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase__ ) ) def lowerCAmelCase__ ( self : Tuple , **UpperCAmelCase__ : List[str] ) ->Dict: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Optional[int] , **UpperCAmelCase__ : int ) ->Tuple: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ) ->Union[str, Any]: return "lower newer", "lower newer" @cached_property def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]: return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' ) @cached_property def lowerCAmelCase__ ( self : Tuple ) ->Tuple: return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' ) @require_torch def lowerCAmelCase__ ( self : Any ) ->Dict: UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] UpperCAmelCase_ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , return_tensors='''pt''' ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) UpperCAmelCase_ = batch.input_ids.tolist()[0] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # Test that special tokens are reset @require_torch def lowerCAmelCase__ ( self : str ) ->int: UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='''pt''' ) # check if input_ids are returned and no labels self.assertIn('''input_ids''' , UpperCAmelCase__ ) self.assertIn('''attention_mask''' , UpperCAmelCase__ ) self.assertNotIn('''labels''' , UpperCAmelCase__ ) self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase__ ) @require_torch def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]: UpperCAmelCase_ = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(text_target=UpperCAmelCase__ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def lowerCAmelCase__ ( self : List[str] ) ->int: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer( ['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='''pt''' ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 1024) ) @require_torch def lowerCAmelCase__ ( self : Dict ) ->Optional[int]: UpperCAmelCase_ = ['''A long paragraph for summarization.'''] UpperCAmelCase_ = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , text_target=UpperCAmelCase__ , return_tensors='''pt''' ) UpperCAmelCase_ = inputs['''input_ids'''] UpperCAmelCase_ = inputs['''labels'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def lowerCAmelCase__ ( self : str ) ->Optional[Any]: pass def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = '''A, <mask> AllenNLP sentence.''' UpperCAmelCase_ = tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
43
0
'''simple docstring''' from collections.abc import Sequence def __lowerCamelCase ( _UpperCamelCase : Dict = None ): '''simple docstring''' if nums is None or not nums: raise ValueError('''Input sequence should not be empty''' ) UpperCAmelCase_ = nums[0] for i in range(1 , len(_UpperCamelCase ) ): UpperCAmelCase_ = nums[i] UpperCAmelCase_ = max(_UpperCamelCase , ans + num , _UpperCamelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user lowercase__ : Optional[Any] = int(input("Enter number of elements : ").strip()) lowercase__ : Union[str, Any] = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n] print(max_subsequence_sum(array))
700
'''simple docstring''' def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] ): '''simple docstring''' UpperCAmelCase_ = '''''' for word_or_phrase in separated: if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise Exception('''join() accepts only strings to be joined''' ) joined += word_or_phrase + separator return joined.strip(_UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
43
0
'''simple docstring''' import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int]=13 , UpperCAmelCase__ : List[str]=30 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[Any]=32 , UpperCAmelCase__ : str=5 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : str=37 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Tuple=10 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=2 , ) ->Union[str, Any]: UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = scope UpperCAmelCase_ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) UpperCAmelCase_ = (image_size // patch_size) ** 2 UpperCAmelCase_ = num_patches + 2 def lowerCAmelCase__ ( self : List[str] ) ->Dict: UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ = self.get_config() return config, pixel_values, labels def lowerCAmelCase__ ( self : Dict ) ->List[Any]: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) ->List[str]: UpperCAmelCase_ = DeiTModel(config=lowercase__ ) model.to(lowercase__ ) model.eval() UpperCAmelCase_ = model(lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int ) ->str: UpperCAmelCase_ = DeiTForMaskedImageModeling(config=lowercase__ ) model.to(lowercase__ ) model.eval() UpperCAmelCase_ = model(lowercase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ = 1 UpperCAmelCase_ = DeiTForMaskedImageModeling(lowercase__ ) model.to(lowercase__ ) model.eval() UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ = model(lowercase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ) ->int: UpperCAmelCase_ = self.type_sequence_label_size UpperCAmelCase_ = DeiTForImageClassification(lowercase__ ) model.to(lowercase__ ) model.eval() UpperCAmelCase_ = model(lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ = 1 UpperCAmelCase_ = DeiTForImageClassification(lowercase__ ) model.to(lowercase__ ) model.eval() UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ = model(lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]: UpperCAmelCase_ = self.prepare_config_and_inputs() ( UpperCAmelCase_ ) = config_and_inputs UpperCAmelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) lowerCAmelCase__ = ( { '''feature-extraction''': DeiTModel, '''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]: UpperCAmelCase_ = DeiTModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 ) def lowerCAmelCase__ ( self : int ) ->int: self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def lowerCAmelCase__ ( self : int ) ->str: pass def lowerCAmelCase__ ( self : List[Any] ) ->Any: UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(lowercase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) ) def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]: UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(lowercase__ ) UpperCAmelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowercase__ ) def lowerCAmelCase__ ( self : Dict ) ->int: UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase__ ) def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]: UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowercase__ ) def lowerCAmelCase__ ( self : int ) ->int: UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase__ ) def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]=False ) ->str: UpperCAmelCase_ = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def lowerCAmelCase__ ( self : str ) ->Optional[int]: if not self.model_tester.is_training: return UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(lowercase__ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue UpperCAmelCase_ = model_class(lowercase__ ) model.to(lowercase__ ) model.train() UpperCAmelCase_ = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ ) UpperCAmelCase_ = model(**lowercase__ ).loss loss.backward() def lowerCAmelCase__ ( self : Tuple ) ->Tuple: UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase_ = False UpperCAmelCase_ = True for model_class in self.all_model_classes: if model_class in get_values(lowercase__ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue UpperCAmelCase_ = model_class(lowercase__ ) model.gradient_checkpointing_enable() model.to(lowercase__ ) model.train() UpperCAmelCase_ = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ ) UpperCAmelCase_ = model(**lowercase__ ).loss loss.backward() def lowerCAmelCase__ ( self : Any ) ->List[Any]: UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(lowercase__ ), *get_values(lowercase__ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ): UpperCAmelCase_ = problem_type["title"] UpperCAmelCase_ = problem_type["num_labels"] UpperCAmelCase_ = model_class(lowercase__ ) model.to(lowercase__ ) model.train() UpperCAmelCase_ = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ ) if problem_type["num_labels"] > 1: UpperCAmelCase_ = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] ) UpperCAmelCase_ = inputs["labels"].to(problem_type['''dtype'''] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=lowercase__ ) as warning_list: UpperCAmelCase_ = model(**lowercase__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ = DeiTModel.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase__ ( self : List[Any] ) ->Dict: return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]: UpperCAmelCase_ = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to( lowercase__ ) UpperCAmelCase_ = self.default_image_processor UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ ) # forward pass with torch.no_grad(): UpperCAmelCase_ = model(**lowercase__ ) # verify the logits UpperCAmelCase_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase__ ) UpperCAmelCase_ = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(lowercase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def lowerCAmelCase__ ( self : List[str] ) ->Dict: UpperCAmelCase_ = DeiTModel.from_pretrained( '''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' ) UpperCAmelCase_ = self.default_image_processor UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = image_processor(images=lowercase__ , return_tensors='''pt''' ) UpperCAmelCase_ = inputs.pixel_values.to(lowercase__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): UpperCAmelCase_ = model(lowercase__ )
701
'''simple docstring''' import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowercase__ : Optional[int] = logging.get_logger(__name__) def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, Iterable[int]] , _UpperCamelCase : bool , _UpperCamelCase : int ): '''simple docstring''' def constraint_to_multiple_of(_UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : str=None ): UpperCAmelCase_ = round(val / multiple ) * multiple if max_val is not None and x > max_val: UpperCAmelCase_ = math.floor(val / multiple ) * multiple if x < min_val: UpperCAmelCase_ = math.ceil(val / multiple ) * multiple return x UpperCAmelCase_ = (output_size, output_size) if isinstance(_UpperCamelCase , _UpperCamelCase ) else output_size UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ = output_size # determine new height and width UpperCAmelCase_ = output_height / input_height UpperCAmelCase_ = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width UpperCAmelCase_ = scale_width else: # fit height UpperCAmelCase_ = scale_height UpperCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=_UpperCamelCase ) UpperCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=_UpperCamelCase ) return (new_height, new_width) class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = ['''pixel_values'''] def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : str , ) ->None: super().__init__(**UpperCAmelCase__ ) UpperCAmelCase_ = size if size is not None else {'''height''': 384, '''width''': 384} UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) UpperCAmelCase_ = do_resize UpperCAmelCase_ = size UpperCAmelCase_ = keep_aspect_ratio UpperCAmelCase_ = ensure_multiple_of UpperCAmelCase_ = resample UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[str] , ) ->np.ndarray: UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) UpperCAmelCase_ = get_resize_output_image_size( UpperCAmelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCAmelCase__ , multiple=UpperCAmelCase__ , ) return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) ->Any: return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) ->np.ndarray: return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : Any , ) ->PIL.Image.Image: UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ = size if size is not None else self.size UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) UpperCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio UpperCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of UpperCAmelCase_ = resample if resample is not None else self.resample UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ = image_std if image_std is not None else self.image_std UpperCAmelCase_ = make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(UpperCAmelCase__ ) for image in images] if do_resize: UpperCAmelCase_ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images] if do_normalize: UpperCAmelCase_ = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images] UpperCAmelCase_ = {'''pixel_values''': images} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ ) def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None ) ->Optional[Any]: UpperCAmelCase_ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(UpperCAmelCase__ ): UpperCAmelCase_ = target_sizes.numpy() UpperCAmelCase_ = [] for idx in range(len(UpperCAmelCase__ ) ): UpperCAmelCase_ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCAmelCase__ ) UpperCAmelCase_ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCAmelCase__ ) else: UpperCAmelCase_ = logits.argmax(dim=1 ) UpperCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
43
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin lowercase__ : List[str] = False @skip_mps class lowerCamelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = StableDiffusionAttendAndExcitePipeline lowerCAmelCase__ = False lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} ) lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def lowerCAmelCase__ ( cls : List[Any] ) ->Tuple: super().setUpClass() torch.use_deterministic_algorithms(UpperCAmelCase__ ) @classmethod def lowerCAmelCase__ ( cls : List[str] ) ->Any: super().tearDownClass() torch.use_deterministic_algorithms(UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Tuple ) ->str: torch.manual_seed(0 ) UpperCAmelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , ) UpperCAmelCase_ = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , ) torch.manual_seed(0 ) UpperCAmelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) UpperCAmelCase_ = CLIPTextModel(UpperCAmelCase__ ) UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def lowerCAmelCase__ ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple=0 ) ->Union[str, Any]: if str(UpperCAmelCase__ ).startswith('''mps''' ): UpperCAmelCase_ = torch.manual_seed(UpperCAmelCase__ ) else: UpperCAmelCase_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) UpperCAmelCase_ = { 'prompt': 'a cat and a frog', 'token_indices': [2, 5], 'generator': generator, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', 'max_iter_to_alter': 2, 'thresholds': {0: 0.7}, } return inputs def lowerCAmelCase__ ( self : Union[str, Any] ) ->str: UpperCAmelCase_ = 'cpu' UpperCAmelCase_ = self.get_dummy_components() UpperCAmelCase_ = self.pipeline_class(**UpperCAmelCase__ ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) UpperCAmelCase_ = self.get_dummy_inputs(UpperCAmelCase__ ) UpperCAmelCase_ = pipe(**UpperCAmelCase__ ).images UpperCAmelCase_ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) UpperCAmelCase_ = np.array( [0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496] ) UpperCAmelCase_ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCAmelCase__ , 1e-3 ) def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any: super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 ) def lowerCAmelCase__ ( self : Optional[int] ) ->str: self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowerCAmelCase__ ( self : Dict ) ->List[str]: self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 ) def lowerCAmelCase__ ( self : str ) ->List[str]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def lowerCAmelCase__ ( self : Optional[int] ) ->str: super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 ) def lowerCAmelCase__ ( self : int ) ->Union[str, Any]: super().test_save_load_local(expected_max_difference=5e-4 ) def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]: super().test_save_load_optional_components(expected_max_difference=4e-4 ) @require_torch_gpu @slow class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def lowerCAmelCase__ ( cls : Any ) ->Dict: super().setUpClass() torch.use_deterministic_algorithms(UpperCAmelCase__ ) @classmethod def lowerCAmelCase__ ( cls : Union[str, Any] ) ->int: super().tearDownClass() torch.use_deterministic_algorithms(UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Optional[Any] ) ->Any: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self : int ) ->str: UpperCAmelCase_ = torch.manual_seed(51 ) UpperCAmelCase_ = StableDiffusionAttendAndExcitePipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) UpperCAmelCase_ = 'a painting of an elephant with glasses' UpperCAmelCase_ = [5, 7] UpperCAmelCase_ = pipe( prompt=UpperCAmelCase__ , token_indices=UpperCAmelCase__ , guidance_scale=7.5 , generator=UpperCAmelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0] UpperCAmelCase_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' ) assert np.abs((expected_image - image).max() ) < 5e-1
702
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : int = logging.get_logger(__name__) lowercase__ : List[str] = { "huggingface/time-series-transformer-tourism-monthly": ( "https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''time_series_transformer''' lowerCAmelCase__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "student_t" , UpperCAmelCase__ : str = "nll" , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase__ : Optional[Union[str, bool]] = "mean" , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Tuple , ) ->Optional[int]: # time series specific configuration UpperCAmelCase_ = prediction_length UpperCAmelCase_ = context_length or prediction_length UpperCAmelCase_ = distribution_output UpperCAmelCase_ = loss UpperCAmelCase_ = input_size UpperCAmelCase_ = num_time_features UpperCAmelCase_ = lags_sequence UpperCAmelCase_ = scaling UpperCAmelCase_ = num_dynamic_real_features UpperCAmelCase_ = num_static_real_features UpperCAmelCase_ = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) UpperCAmelCase_ = cardinality else: UpperCAmelCase_ = [0] if embedding_dimension and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) UpperCAmelCase_ = embedding_dimension else: UpperCAmelCase_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] UpperCAmelCase_ = num_parallel_samples # Transformer architecture configuration UpperCAmelCase_ = input_size * len(UpperCAmelCase__ ) + self._number_of_features UpperCAmelCase_ = d_model UpperCAmelCase_ = encoder_attention_heads UpperCAmelCase_ = decoder_attention_heads UpperCAmelCase_ = encoder_ffn_dim UpperCAmelCase_ = decoder_ffn_dim UpperCAmelCase_ = encoder_layers UpperCAmelCase_ = decoder_layers UpperCAmelCase_ = dropout UpperCAmelCase_ = attention_dropout UpperCAmelCase_ = activation_dropout UpperCAmelCase_ = encoder_layerdrop UpperCAmelCase_ = decoder_layerdrop UpperCAmelCase_ = activation_function UpperCAmelCase_ = init_std UpperCAmelCase_ = use_cache super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ ) @property def lowerCAmelCase__ ( self : List[str] ) ->int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
43
0
'''simple docstring''' from collections.abc import Sequence def __lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ): '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(__lowercase ) ) def __lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ): '''simple docstring''' UpperCAmelCase_ = 0.0 for coeff in reversed(__lowercase ): UpperCAmelCase_ = result * x + coeff return result if __name__ == "__main__": lowercase__ : Dict = (0.0, 0.0, 5.0, 9.3, 7.0) lowercase__ : int = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
703
'''simple docstring''' from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig lowercase__ : Dict = logging.get_logger(__name__) lowercase__ : List[Any] = "T5Config" class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''mt5''' lowerCAmelCase__ = MTaConfig class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''mt5''' lowerCAmelCase__ = MTaConfig class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''mt5''' lowerCAmelCase__ = MTaConfig
43
0
'''simple docstring''' from sklearn.metrics import mean_squared_error import datasets lowercase__ : Any = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" lowercase__ : str = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" lowercase__ : Tuple = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def lowerCAmelCase__ ( self : Dict ) ->List[str]: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[str]="uniform_average" , UpperCAmelCase__ : int=True ) ->Tuple: UpperCAmelCase_ = mean_squared_error( _A , _A , sample_weight=_A , multioutput=_A , squared=_A ) return {"mse": mse}
704
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets lowercase__ : str = datasets.logging.get_logger(__name__) lowercase__ : Dict = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n" lowercase__ : str = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n" lowercase__ : str = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase__ ( self : List[Any] ) ->Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''sources''': datasets.Value('''string''' , id='''sequence''' ), '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[ '''https://github.com/Unbabel/COMET''', '''https://www.aclweb.org/anthology/2020.emnlp-main.213/''', '''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''', ] , ) def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Union[str, Any] ) ->Any: if self.config_name == "default": UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) ) else: UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=False ) ->Optional[Any]: if gpus is None: UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0 UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references} UpperCAmelCase_ = [dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) for t in zip(*data.values() )] UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(UpperCAmelCase__ , gpus=UpperCAmelCase__ , progress_bar=UpperCAmelCase__ ) return {"mean_score": mean_score, "scores": scores}
43
0
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 lowercase__ : Tuple = get_tests_dir("fixtures/dummy_feature_extractor_config.json") lowercase__ : Union[str, Any] = get_tests_dir("fixtures/vocab.json") lowercase__ : Union[str, Any] = get_tests_dir("fixtures") class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]: UpperCAmelCase_ = 0 def lowerCAmelCase__ ( self : List[Any] ) ->Union[str, Any]: UpperCAmelCase_ = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(snake_case__ , snake_case__ ) def lowerCAmelCase__ ( self : List[Any] ) ->int: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = WavaVecaConfig() UpperCAmelCase_ = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(snake_case__ ) processor.save_pretrained(snake_case__ ) UpperCAmelCase_ = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def lowerCAmelCase__ ( self : Any ) ->Tuple: with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) copyfile(snake_case__ , os.path.join(snake_case__ , '''vocab.json''' ) ) UpperCAmelCase_ = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def lowerCAmelCase__ ( self : int ) ->Union[str, Any]: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = WavaVecaFeatureExtractor() UpperCAmelCase_ = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) UpperCAmelCase_ = WavaVecaProcessor(snake_case__ , snake_case__ ) # save in new folder processor.save_pretrained(snake_case__ ) # drop `processor_class` in tokenizer with open(os.path.join(snake_case__ , snake_case__ ) , '''r''' ) as f: UpperCAmelCase_ = json.load(snake_case__ ) config_dict.pop('''processor_class''' ) with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' ) as f: f.write(json.dumps(snake_case__ ) ) UpperCAmelCase_ = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def lowerCAmelCase__ ( self : Any ) ->Optional[Any]: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = WavaVecaFeatureExtractor() UpperCAmelCase_ = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) UpperCAmelCase_ = WavaVecaProcessor(snake_case__ , snake_case__ ) # save in new folder processor.save_pretrained(snake_case__ ) # drop `processor_class` in feature extractor with open(os.path.join(snake_case__ , snake_case__ ) , '''r''' ) as f: UpperCAmelCase_ = json.load(snake_case__ ) config_dict.pop('''processor_class''' ) with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' ) as f: f.write(json.dumps(snake_case__ ) ) UpperCAmelCase_ = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def lowerCAmelCase__ ( self : str ) ->List[str]: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(snake_case__ ) # copy relevant files copyfile(snake_case__ , os.path.join(snake_case__ , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' ) as f: f.write('''{}''' ) UpperCAmelCase_ = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def lowerCAmelCase__ ( self : Tuple ) ->int: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(snake_case__ ): UpperCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(snake_case__ ): UpperCAmelCase_ = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=snake_case__ ) UpperCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=snake_case__ ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) UpperCAmelCase_ = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) UpperCAmelCase_ = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase_ = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=snake_case__ , use_fast=snake_case__ ) UpperCAmelCase_ = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[Any]: try: AutoConfig.register('''custom''' , snake_case__ ) AutoFeatureExtractor.register(snake_case__ , snake_case__ ) AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) AutoProcessor.register(snake_case__ , snake_case__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(snake_case__ ): AutoProcessor.register(snake_case__ , snake_case__ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase_ = CustomFeatureExtractor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ = os.path.join(snake_case__ , '''vocab.txt''' ) with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) UpperCAmelCase_ = CustomTokenizer(snake_case__ ) UpperCAmelCase_ = CustomProcessor(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(snake_case__ ) UpperCAmelCase_ = AutoProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple: class lowerCamelCase ( __a ): '''simple docstring''' lowerCAmelCase__ = False class lowerCamelCase ( __a ): '''simple docstring''' lowerCAmelCase__ = False class lowerCamelCase ( __a ): '''simple docstring''' lowerCAmelCase__ = '''AutoFeatureExtractor''' lowerCAmelCase__ = '''AutoTokenizer''' lowerCAmelCase__ = False try: AutoConfig.register('''custom''' , snake_case__ ) AutoFeatureExtractor.register(snake_case__ , snake_case__ ) AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ ) AutoProcessor.register(snake_case__ , snake_case__ ) # If remote code is not set, the default is to use local classes. UpperCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. UpperCAmelCase_ = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=snake_case__ ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. UpperCAmelCase_ = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=snake_case__ ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]: UpperCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def lowerCAmelCase__ ( self : Optional[int] ) ->str: UpperCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def lowerCAmelCase__ ( cls : Optional[int] ) ->Tuple: UpperCAmelCase_ = TOKEN HfFolder.save_token(snake_case__ ) @classmethod def lowerCAmelCase__ ( cls : Union[str, Any] ) ->Union[str, Any]: try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]: UpperCAmelCase_ = WavaVecaProcessor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(snake_case__ , '''test-processor''' ) , push_to_hub=snake_case__ , use_auth_token=self._token ) UpperCAmelCase_ = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def lowerCAmelCase__ ( self : Optional[int] ) ->Dict: UpperCAmelCase_ = WavaVecaProcessor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(snake_case__ , '''test-processor-org''' ) , push_to_hub=snake_case__ , use_auth_token=self._token , organization='''valid_org''' , ) UpperCAmelCase_ = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def lowerCAmelCase__ ( self : Tuple ) ->int: CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() UpperCAmelCase_ = CustomFeatureExtractor.from_pretrained(snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ = os.path.join(snake_case__ , '''vocab.txt''' ) with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) UpperCAmelCase_ = CustomTokenizer(snake_case__ ) UpperCAmelCase_ = CustomProcessor(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token ) UpperCAmelCase_ = Repository(snake_case__ , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token ) processor.save_pretrained(snake_case__ ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(snake_case__ , '''tokenizer_config.json''' ) ) as f: UpperCAmelCase_ = json.load(snake_case__ ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(snake_case__ , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(snake_case__ , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(snake_case__ , '''custom_processing.py''' ) ) ) repo.push_to_hub() UpperCAmelCase_ = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=snake_case__ ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
705
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Union[str, Any]=400 , UpperCAmelCase__ : List[Any]=3 , ) ->Dict: UpperCAmelCase_ = parent UpperCAmelCase_ = do_resize UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288} UpperCAmelCase_ = size_divisor UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = do_center_crop UpperCAmelCase_ = image_mean UpperCAmelCase_ = image_std UpperCAmelCase_ = do_pad UpperCAmelCase_ = batch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = min_resolution UpperCAmelCase_ = max_resolution def lowerCAmelCase__ ( self : Tuple ) ->List[Any]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=False ) ->Any: if not batched: UpperCAmelCase_ = self.size['''shortest_edge'''] UpperCAmelCase_ = image_inputs[0] if isinstance(UpperCAmelCase__ , Image.Image ): UpperCAmelCase_ , UpperCAmelCase_ = image.size else: UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2] UpperCAmelCase_ = size / min(UpperCAmelCase__ , UpperCAmelCase__ ) if h < w: UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w else: UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size UpperCAmelCase_ = int((1333 / 800) * size ) if max(UpperCAmelCase__ , UpperCAmelCase__ ) > max_size: UpperCAmelCase_ = max_size / max(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = newh * scale UpperCAmelCase_ = neww * scale UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5 ), int(neww + 0.5 ) UpperCAmelCase_ , UpperCAmelCase_ = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: UpperCAmelCase_ = [] for image in image_inputs: UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0] UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = BridgeTowerImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self : Optional[int] ) ->str: UpperCAmelCase_ = BridgeTowerImageProcessingTester(self ) @property def lowerCAmelCase__ ( self : List[str] ) ->Dict: return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self : Optional[int] ) ->int: UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''size_divisor''' ) ) def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]: pass def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self : Any ) ->Optional[int]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self : int ) ->List[str]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
43
0
'''simple docstring''' import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase__ : Tuple = logging.get_logger(__name__) class lowerCamelCase ( __lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = ['''input_values''', '''attention_mask'''] def __init__( self : Union[str, Any] , UpperCAmelCase__ : str = 1 , UpperCAmelCase__ : Optional[Any] = 1_6000 , UpperCAmelCase__ : Optional[Any] = 0.0 , UpperCAmelCase__ : Optional[int] = False , UpperCAmelCase__ : Dict = 80 , UpperCAmelCase__ : Union[str, Any] = 16 , UpperCAmelCase__ : List[Any] = 64 , UpperCAmelCase__ : Any = "hann_window" , UpperCAmelCase__ : Optional[Any] = 1.0 , UpperCAmelCase__ : Tuple = 80 , UpperCAmelCase__ : Tuple = 7600 , UpperCAmelCase__ : Union[str, Any] = 1e-10 , UpperCAmelCase__ : Dict = 2 , UpperCAmelCase__ : List[Any] = True , **UpperCAmelCase__ : List[Any] , ) ->Tuple: super().__init__(feature_size=a_ , sampling_rate=a_ , padding_value=a_ , **a_ ) UpperCAmelCase_ = do_normalize UpperCAmelCase_ = return_attention_mask UpperCAmelCase_ = num_mel_bins UpperCAmelCase_ = hop_length UpperCAmelCase_ = win_length UpperCAmelCase_ = win_function UpperCAmelCase_ = frame_signal_scale UpperCAmelCase_ = fmin UpperCAmelCase_ = fmax UpperCAmelCase_ = mel_floor UpperCAmelCase_ = reduction_factor UpperCAmelCase_ = win_length * sampling_rate // 1000 UpperCAmelCase_ = hop_length * sampling_rate // 1000 UpperCAmelCase_ = optimal_fft_length(self.sample_size ) UpperCAmelCase_ = (self.n_fft // 2) + 1 UpperCAmelCase_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=a_ ) UpperCAmelCase_ = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , ) if frame_signal_scale != 1.0: warnings.warn( '''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , a_ , ) if reduction_factor != 2.0: warnings.warn( '''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , a_ , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowerCAmelCase__ ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] = 0.0 ) ->List[str]: if attention_mask is not None: UpperCAmelCase_ = np.array(a_ , np.intaa ) UpperCAmelCase_ = [] for vector, length in zip(a_ , attention_mask.sum(-1 ) ): UpperCAmelCase_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: UpperCAmelCase_ = padding_value normed_input_values.append(a_ ) else: UpperCAmelCase_ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , ) ->int: UpperCAmelCase_ = spectrogram( a_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , ) return log_mel_spec.T def __call__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : int = False , UpperCAmelCase__ : List[str] = None , UpperCAmelCase__ : Any = False , UpperCAmelCase__ : Dict = None , UpperCAmelCase__ : Any = None , UpperCAmelCase__ : Union[str, Any] = None , UpperCAmelCase__ : Any = None , **UpperCAmelCase__ : Dict , ) ->Optional[Any]: if audio is None and audio_target is None: raise ValueError('''You must provide either `audio` or `audio_target` values.''' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the ``sampling_rate`` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) if audio is not None: UpperCAmelCase_ = self._process_audio( a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , **a_ , ) else: UpperCAmelCase_ = None if audio_target is not None: UpperCAmelCase_ = self._process_audio( a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , **a_ , ) if inputs is None: return inputs_target else: UpperCAmelCase_ = inputs_target["input_values"] UpperCAmelCase_ = inputs_target.get('''attention_mask''' ) if decoder_attention_mask is not None: UpperCAmelCase_ = decoder_attention_mask return inputs def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int = False , UpperCAmelCase__ : Optional[int] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = False , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : List[Any] = None , UpperCAmelCase__ : List[str] = None , **UpperCAmelCase__ : int , ) ->Optional[Any]: UpperCAmelCase_ = isinstance(a_ , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) UpperCAmelCase_ = is_batched_numpy or ( isinstance(a_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase_ = [np.asarray(a_ , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(a_ , np.ndarray ): UpperCAmelCase_ = np.asarray(a_ , dtype=np.floataa ) elif isinstance(a_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): UpperCAmelCase_ = speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase_ = [speech] # needed to make pad() work on spectrogram inputs UpperCAmelCase_ = self.feature_size # convert into correct format for padding if is_target: UpperCAmelCase_ = [self._extract_mel_features(a_ ) for waveform in speech] UpperCAmelCase_ = BatchFeature({'''input_values''': features} ) UpperCAmelCase_ = self.num_mel_bins else: UpperCAmelCase_ = BatchFeature({'''input_values''': speech} ) UpperCAmelCase_ = self.pad( a_ , padding=a_ , max_length=a_ , truncation=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , **a_ , ) UpperCAmelCase_ = feature_size_hack # convert input values to correct format UpperCAmelCase_ = padded_inputs["input_values"] if not isinstance(input_values[0] , np.ndarray ): UpperCAmelCase_ = [np.asarray(a_ , dtype=np.floataa ) for array in input_values] elif ( not isinstance(a_ , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): UpperCAmelCase_ = [array.astype(np.floataa ) for array in input_values] elif isinstance(a_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): UpperCAmelCase_ = input_values.astype(np.floataa ) # convert attention_mask to correct format UpperCAmelCase_ = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: UpperCAmelCase_ = [np.asarray(a_ , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: UpperCAmelCase_ = ( attention_mask if self._get_padding_strategies(a_ , max_length=a_ ) is not PaddingStrategy.DO_NOT_PAD else None ) UpperCAmelCase_ = self.zero_mean_unit_var_norm( padded_inputs['''input_values'''] , attention_mask=a_ , padding_value=self.padding_value ) if return_tensors is not None: UpperCAmelCase_ = padded_inputs.convert_to_tensors(a_ ) return padded_inputs def lowerCAmelCase__ ( self : List[str] ) ->Any: UpperCAmelCase_ = super().to_dict() # Don't serialize these as they are derived from the other properties. UpperCAmelCase_ = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"] for name in names: if name in output: del output[name] return output
706
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Dict ) ->List[str]: UpperCAmelCase_ = tf.convert_to_tensor( [ [ 8.222_0991, # 3rd highest value; idx. 0 -0.562_0044, 5.2322_9752, 4.038_6393, -6.879_8378, -0.5478_5802, -3.201_2153, 2.9277_7176, 1.8817_1953, 7.3534_1276, # 5th highest value; idx. 9 8.4320_7833, # 2nd highest value; idx. 10 -9.8571_1836, -5.9620_9236, -1.1303_9161, -7.111_5294, -0.836_9633, -5.318_6408, 7.0642_7407, 0.8136_9344, -0.8202_3817, -5.917_9796, 0.5881_3443, -6.9977_8438, 4.7155_1189, -0.1877_1637, 7.4402_0759, # 4th highest value; idx. 25 9.3845_0987, # 1st highest value; idx. 26 2.1266_2941, -9.3256_2038, 2.3565_2522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.5842_5518, 4.5313_9238, -5.5751_0464, -6.2803_0699, -7.1952_9503, -4.0212_2551, 1.3933_7037, -6.0670_7057, 1.5948_0517, -9.64_3119, 0.0390_7799, 0.6723_1762, -8.8820_6726, 6.2711_5922, # 4th highest value; idx. 13 2.2852_0723, 4.8276_7506, 4.3042_1368, 8.827_5313, # 2nd highest value; idx. 17 5.4402_9958, # 5th highest value; idx. 18 -4.473_5794, 7.3857_9536, # 3rd highest value; idx. 20 -2.9105_1663, 2.6194_6077, -2.567_4762, -9.4895_9302, -4.0292_2645, -1.3541_6918, 9.6770_2323, # 1st highest value; idx. 27 -5.8947_8553, 1.8537_0467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) UpperCAmelCase_ = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above UpperCAmelCase_ = tf.convert_to_tensor( [8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above UpperCAmelCase_ = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) UpperCAmelCase_ = output[output != -float('''inf''' )] UpperCAmelCase_ = tf.cast( tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-12 ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @require_tf class lowerCamelCase ( unittest.TestCase , lowerCamelCase ): '''simple docstring''' if is_tf_available(): lowerCAmelCase__ = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]: # TF-only test: tf.saved_model export UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 2 UpperCAmelCase_ = 2 class lowerCamelCase ( tf.Module ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : List[str] ) ->Dict: super(UpperCAmelCase__ , self ).__init__() UpperCAmelCase_ = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=UpperCAmelCase__ , ) def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ) ->int: UpperCAmelCase_ = self.model.generate( input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , ) return {"sequences": outputs["sequences"]} UpperCAmelCase_ = [[2, 0], [102, 103]] UpperCAmelCase_ = [[1, 0], [1, 1]] UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} ) UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default'''] for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ): UpperCAmelCase_ = { '''input_ids''': tf.constant(dummy_input_ids[:batch_size] ), '''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ), } UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences'''] UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]: # TF-only test: tf.saved_model export UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 1 UpperCAmelCase_ = 2 class lowerCamelCase ( tf.Module ): '''simple docstring''' def __init__( self : int , UpperCAmelCase__ : int ) ->List[str]: super(UpperCAmelCase__ , self ).__init__() UpperCAmelCase_ = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=UpperCAmelCase__ , ) def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) ->int: UpperCAmelCase_ = self.model.generate( input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , ) return {"sequences": outputs["sequences"]} UpperCAmelCase_ = [[2], [102, 103]] UpperCAmelCase_ = [[1], [1, 1]] UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} ) UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default'''] for input_row in range(len(UpperCAmelCase__ ) ): UpperCAmelCase_ = { '''input_ids''': tf.constant([dummy_input_ids[input_row]] ), '''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ), } UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences'''] UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @slow @require_tensorflow_text def lowerCAmelCase__ ( self : Optional[Any] ) ->int: # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCAmelCase__ ) class lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ) ->Any: super().__init__() UpperCAmelCase_ = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , '''spiece.model''' ) , '''rb''' ).read() ) UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : str ) ->List[str]: UpperCAmelCase_ = self.tokenizer.tokenize(UpperCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = text.pad_model_inputs( UpperCAmelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) UpperCAmelCase_ = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) return self.tokenizer.detokenize(UpperCAmelCase__ ) UpperCAmelCase_ = CompleteSentenceTransformer() UpperCAmelCase_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' ) UpperCAmelCase_ = complete_model(UpperCAmelCase__ ) UpperCAmelCase_ = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ ) keras_model.save(UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple: # Has PT equivalent: this test relies on random sampling UpperCAmelCase_ = { '''do_sample''': True, '''num_beams''': 1, '''top_p''': 0.7, '''top_k''': 10, '''temperature''': 0.7, } UpperCAmelCase_ = 14 UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = '''Hello, my dog is cute and''' UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''tf''' ) UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) UpperCAmelCase_ = [638, 198] with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]: # Has PT equivalent: ample use of framework-specific code UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = '''Hugging Face is a technology company based in New York and Paris.''' UpperCAmelCase_ = bart_tokenizer(UpperCAmelCase__ , return_tensors='''tf''' ).input_ids UpperCAmelCase_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy() class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int ) ->List[str]: return super().call(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ , foo='''bar''' ).numpy() self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) ) class lowerCamelCase ( bart_model.model.encoder.__class__ ): '''simple docstring''' def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict ) ->Any: return super().call(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = FakeEncoder(bart_model.config , bart_model.model.shared ) UpperCAmelCase_ = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy() with self.assertRaises(UpperCAmelCase__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(UpperCAmelCase__ , foo='''bar''' )
43
0
'''simple docstring''' def __lowerCamelCase ( _UpperCamelCase : Tuple ): '''simple docstring''' if edge <= 0 or not isinstance(_snake_case , _snake_case ): raise ValueError('''Length must be a positive.''' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ): '''simple docstring''' if edge <= 0 or not isinstance(_snake_case , _snake_case ): raise ValueError('''Length must be a positive.''' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
707
'''simple docstring''' from collections.abc import Callable def __lowerCamelCase ( _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float , _UpperCamelCase : float ): '''simple docstring''' UpperCAmelCase_ = a UpperCAmelCase_ = b if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function return a elif function(_UpperCamelCase ) == 0: return b elif ( function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: UpperCAmelCase_ = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_UpperCamelCase ) == 0: return mid elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0: UpperCAmelCase_ = mid else: UpperCAmelCase_ = mid UpperCAmelCase_ = start + (end - start) / 2.0 return mid def __lowerCamelCase ( _UpperCamelCase : float ): '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
43
0
'''simple docstring''' def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : float ): '''simple docstring''' return round(float(moles / volume ) * nfactor ) def __lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ): '''simple docstring''' return round(float((moles * 0.0_821 * temperature) / (volume) ) ) def __lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ): '''simple docstring''' return round(float((moles * 0.0_821 * temperature) / (pressure) ) ) def __lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ): '''simple docstring''' return round(float((pressure * volume) / (0.0_821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
708
'''simple docstring''' import re def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )] def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' UpperCAmelCase_ = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : str ): '''simple docstring''' try: UpperCAmelCase_ = split_input(_UpperCamelCase ) if upper: UpperCAmelCase_ = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: UpperCAmelCase_ = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' return to_simple_case(_UpperCamelCase ) def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' try: UpperCAmelCase_ = to_simple_case(_UpperCamelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ): '''simple docstring''' return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''_''' ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ): '''simple docstring''' return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''-''' ) if __name__ == "__main__": __import__("doctest").testmod()
43
0
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def __lowerCamelCase ( _UpperCamelCase : List[str] ): '''simple docstring''' return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict ): '''simple docstring''' UpperCAmelCase_ = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue UpperCAmelCase_ = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) UpperCAmelCase_ = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) UpperCAmelCase_ = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) UpperCAmelCase_ = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) UpperCAmelCase_ = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) UpperCAmelCase_ = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) UpperCAmelCase_ = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) UpperCAmelCase_ = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) UpperCAmelCase_ = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) UpperCAmelCase_ = key.replace('''image_encoder.module''' , '''flava.image_model''' ) UpperCAmelCase_ = key.replace('''text_encoder.module''' , '''flava.text_model''' ) UpperCAmelCase_ = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) UpperCAmelCase_ = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) UpperCAmelCase_ = key.replace('''text_projection''' , '''flava.text_projection''' ) UpperCAmelCase_ = key.replace('''image_projection''' , '''flava.image_projection''' ) UpperCAmelCase_ = value.float() for key, value in codebook_state_dict.items(): UpperCAmelCase_ = value return upgrade @torch.no_grad() def __lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Tuple=None ): '''simple docstring''' if config_path is not None: UpperCAmelCase_ = FlavaConfig.from_pretrained(A_ ) else: UpperCAmelCase_ = FlavaConfig() UpperCAmelCase_ = FlavaForPreTraining(A_ ).eval() UpperCAmelCase_ = convert_dalle_checkpoint(A_ , A_ , save_checkpoint=A_ ) if os.path.exists(A_ ): UpperCAmelCase_ = torch.load(A_ , map_location='''cpu''' ) else: UpperCAmelCase_ = torch.hub.load_state_dict_from_url(A_ , map_location='''cpu''' ) UpperCAmelCase_ = upgrade_state_dict(A_ , A_ ) hf_model.load_state_dict(A_ ) UpperCAmelCase_ = hf_model.state_dict() UpperCAmelCase_ = count_parameters(A_ ) UpperCAmelCase_ = count_parameters(A_ ) + count_parameters(A_ ) assert torch.allclose(A_ , A_ , atol=1E-3 ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": lowercase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") lowercase__ : List[Any] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
709
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowercase__ : Optional[Any] = "platform" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=None , _UpperCamelCase : int=None , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=None , ): '''simple docstring''' if attention_mask is None: UpperCAmelCase_ = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: UpperCAmelCase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: UpperCAmelCase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCamelCase : '''simple docstring''' def __init__( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[int]=99 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Union[str, Any]=0.02 , ) ->Optional[int]: UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = eos_token_id UpperCAmelCase_ = pad_token_id UpperCAmelCase_ = bos_token_id UpperCAmelCase_ = initializer_range def lowerCAmelCase__ ( self : int ) ->Any: UpperCAmelCase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) UpperCAmelCase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 ) UpperCAmelCase_ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , ) UpperCAmelCase_ = prepare_blenderbot_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return config, inputs_dict def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]: UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs() return config, inputs_dict def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) ->Tuple: UpperCAmelCase_ = 20 UpperCAmelCase_ = model_class_name(UpperCAmelCase__ ) UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase_ , UpperCAmelCase_ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) UpperCAmelCase_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , ) UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , ) UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) ->Union[str, Any]: UpperCAmelCase_ = 20 UpperCAmelCase_ = model_class_name(UpperCAmelCase__ ) UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase_ , UpperCAmelCase_ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase_ = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , ) UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , ) UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ ) UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = 99 def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]: UpperCAmelCase_ = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) UpperCAmelCase_ = input_ids.shape[0] UpperCAmelCase_ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowerCAmelCase__ ( self : Any ) ->str: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_config_and_data() UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ ) UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ ) UpperCAmelCase_ = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : str ) ->int: UpperCAmelCase_ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ ) UpperCAmelCase_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) UpperCAmelCase_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ ) UpperCAmelCase_ = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]: UpperCAmelCase_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 ) UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum() UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(UpperCAmelCase__ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowerCamelCase ( lowerCamelCase , unittest.TestCase , lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = True lowerCAmelCase__ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]: UpperCAmelCase_ = FlaxBlenderbotModelTester(self ) def lowerCAmelCase__ ( self : str ) ->Tuple: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Tuple ) ->str: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Dict ) ->Tuple: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = model_class(UpperCAmelCase__ ) @jax.jit def encode_jitted(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : Union[str, Any] ): return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple() self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase__ ( self : str ) ->str: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ = model_class(UpperCAmelCase__ ) UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) UpperCAmelCase_ = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ): return model.decode( decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple() self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCAmelCase__ ( self : int ) ->int: for model_class_name in self.all_model_classes: UpperCAmelCase_ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids UpperCAmelCase_ = np.ones((1, 1) ) * model.config.eos_token_id UpperCAmelCase_ = model(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]: UpperCAmelCase_ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} UpperCAmelCase_ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCAmelCase__ ) UpperCAmelCase_ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) UpperCAmelCase_ = ['''Sam'''] UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''jax''' ) UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = '''Sam is a great name. It means "sun" in Gaelic.''' UpperCAmelCase_ = tokenizer.batch_decode(UpperCAmelCase__ , **UpperCAmelCase__ ) assert generated_txt[0].strip() == tgt_text
43
0
'''simple docstring''' import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ = AutoConfig.from_pretrained(_UpperCamelCase ) UpperCAmelCase_ = FlaxAutoModelForSeqaSeqLM.from_config(config=_UpperCamelCase ) UpperCAmelCase_ = checkpoints.load_tax_checkpoint(_UpperCamelCase ) UpperCAmelCase_ = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": UpperCAmelCase_ = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": UpperCAmelCase_ = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase_ = 'TransientGlobalSelfAttention' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): UpperCAmelCase_ = F"""layers_{str(_UpperCamelCase )}""" # Self-Attention UpperCAmelCase_ = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] UpperCAmelCase_ = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] UpperCAmelCase_ = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] UpperCAmelCase_ = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase_ = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization UpperCAmelCase_ = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: UpperCAmelCase_ = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] UpperCAmelCase_ = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: UpperCAmelCase_ = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] UpperCAmelCase_ = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization UpperCAmelCase_ = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning UpperCAmelCase_ = flax_model.params['encoder']['block'][str(_UpperCamelCase )]['layer'] UpperCAmelCase_ = tax_attention_key UpperCAmelCase_ = tax_attention_out UpperCAmelCase_ = tax_attention_query UpperCAmelCase_ = tax_attention_value UpperCAmelCase_ = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase_ = tax_global_layer_norm if split_mlp_wi: UpperCAmelCase_ = tax_mlp_wi_a UpperCAmelCase_ = tax_mlp_wi_a else: UpperCAmelCase_ = tax_mlp_wi UpperCAmelCase_ = tax_mlp_wo UpperCAmelCase_ = tax_mlp_layer_norm UpperCAmelCase_ = flax_model_encoder_layer_block # Only for layer 0: UpperCAmelCase_ = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T UpperCAmelCase_ = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase_ = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T UpperCAmelCase_ = tax_encoder_global_rel_embedding # Assigning UpperCAmelCase_ = tax_model['target']['encoder']['encoder_norm']['scale'] UpperCAmelCase_ = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): UpperCAmelCase_ = F"""layers_{str(_UpperCamelCase )}""" # Self-Attention UpperCAmelCase_ = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] UpperCAmelCase_ = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] UpperCAmelCase_ = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] UpperCAmelCase_ = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization UpperCAmelCase_ = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention UpperCAmelCase_ = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] UpperCAmelCase_ = tax_enc_dec_attention_module['key']['kernel'] UpperCAmelCase_ = tax_enc_dec_attention_module['out']['kernel'] UpperCAmelCase_ = tax_enc_dec_attention_module['query']['kernel'] UpperCAmelCase_ = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization UpperCAmelCase_ = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: UpperCAmelCase_ = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] UpperCAmelCase_ = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: UpperCAmelCase_ = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] UpperCAmelCase_ = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization UpperCAmelCase_ = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning UpperCAmelCase_ = flax_model.params['decoder']['block'][str(_UpperCamelCase )]['layer'] UpperCAmelCase_ = tax_attention_key UpperCAmelCase_ = tax_attention_out UpperCAmelCase_ = tax_attention_query UpperCAmelCase_ = tax_attention_value UpperCAmelCase_ = tax_pre_attention_layer_norm UpperCAmelCase_ = tax_enc_dec_attention_key UpperCAmelCase_ = tax_enc_dec_attention_out UpperCAmelCase_ = tax_enc_dec_attention_query UpperCAmelCase_ = tax_enc_dec_attention_value UpperCAmelCase_ = tax_cross_layer_norm if split_mlp_wi: UpperCAmelCase_ = tax_mlp_wi_a UpperCAmelCase_ = tax_mlp_wi_a else: UpperCAmelCase_ = tax_mlp_wi UpperCAmelCase_ = tax_mlp_wo UpperCAmelCase_ = txa_mlp_layer_norm UpperCAmelCase_ = flax_model_decoder_layer_block # Decoder Normalization UpperCAmelCase_ = tax_model['target']['decoder']['decoder_norm']['scale'] UpperCAmelCase_ = txa_decoder_norm # Only for layer 0: UpperCAmelCase_ = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T UpperCAmelCase_ = tax_decoder_rel_embedding # Token Embeddings UpperCAmelCase_ = tax_model['target']['token_embedder']['embedding'] UpperCAmelCase_ = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: UpperCAmelCase_ = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(_UpperCamelCase ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": lowercase__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) lowercase__ : List[str] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
710
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowercase__ : Tuple = pytest.mark.integration @pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] ) def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ): '''simple docstring''' inspect_dataset(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = path + '''.py''' assert script_name in os.listdir(_UpperCamelCase ) assert "__pycache__" not in os.listdir(_UpperCamelCase ) @pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.parametrize('''path''' , ['''accuracy'''] ) def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ): '''simple docstring''' inspect_metric(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = path + '''.py''' assert script_name in os.listdir(_UpperCamelCase ) assert "__pycache__" not in os.listdir(_UpperCamelCase ) @pytest.mark.parametrize( '''path, config_name, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ): '''simple docstring''' with pytest.raises(_UpperCamelCase ): get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase ) @pytest.mark.parametrize( '''path, expected''' , [ ('''squad''', '''plain_text'''), ('''acronym_identification''', '''default'''), ('''lhoestq/squad''', '''plain_text'''), ('''lhoestq/test''', '''default'''), ('''lhoestq/demo1''', '''lhoestq--demo1'''), ('''dalle-mini/wit''', '''dalle-mini--wit'''), ] , ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ = get_dataset_config_names(_UpperCamelCase ) assert expected in config_names @pytest.mark.parametrize( '''path, expected_configs, expected_splits_in_first_config''' , [ ('''squad''', ['''plain_text'''], ['''train''', '''validation''']), ('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']), ('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase ) assert list(infos.keys() ) == expected_configs UpperCAmelCase_ = expected_configs[0] assert expected_config in infos UpperCAmelCase_ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( '''path, expected_config, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : str ): '''simple docstring''' UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase ) assert expected_config in infos UpperCAmelCase_ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ): '''simple docstring''' with pytest.raises(_UpperCamelCase ): get_dataset_split_names(_UpperCamelCase , config_name=_UpperCamelCase )
43
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase__ : Any = logging.get_logger(__name__) lowercase__ : Tuple = { "hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''yolos''' def __init__( self : Dict , UpperCAmelCase__ : str=768 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Union[str, Any]=3072 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Optional[int]=1e-12 , UpperCAmelCase__ : int=[512, 864] , UpperCAmelCase__ : str=16 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=100 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : str=5 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : int=0.1 , **UpperCAmelCase__ : int , ) ->Any: super().__init__(**__a ) UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = qkv_bias UpperCAmelCase_ = num_detection_tokens UpperCAmelCase_ = use_mid_position_embeddings UpperCAmelCase_ = auxiliary_loss # Hungarian matcher UpperCAmelCase_ = class_cost UpperCAmelCase_ = bbox_cost UpperCAmelCase_ = giou_cost # Loss coefficients UpperCAmelCase_ = bbox_loss_coefficient UpperCAmelCase_ = giou_loss_coefficient UpperCAmelCase_ = eos_coefficient class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = version.parse('''1.11''' ) @property def lowerCAmelCase__ ( self : List[str] ) ->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCAmelCase__ ( self : Tuple ) ->float: return 1e-4 @property def lowerCAmelCase__ ( self : List[Any] ) ->int: return 12
711
'''simple docstring''' import collections import os import re from pathlib import Path lowercase__ : List[Any] = "src/transformers" # Matches is_xxx_available() lowercase__ : Optional[Any] = re.compile(R"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} lowercase__ : Any = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowercase__ : Union[str, Any] = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available lowercase__ : Optional[int] = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") lowercase__ : List[str] = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowercase__ : Any = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", lowercase__ : List[Any] = re.compile(R"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], lowercase__ : Optional[Any] = re.compile(R"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo lowercase__ : Union[str, Any] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: lowercase__ : int = re.compile(R"^\s*try:") # Catches a line with else: lowercase__ : Any = re.compile(R"^\s*else:") def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ): '''simple docstring''' if _re_test_backend.search(_UpperCamelCase ) is None: return None UpperCAmelCase_ = [b[0] for b in _re_backend.findall(_UpperCamelCase )] backends.sort() return "_and_".join(_UpperCamelCase ) def __lowerCamelCase ( _UpperCamelCase : int ): '''simple docstring''' with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCAmelCase_ = f.readlines() UpperCAmelCase_ = 0 while line_index < len(_UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure UpperCAmelCase_ = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: UpperCAmelCase_ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_UpperCamelCase ): UpperCAmelCase_ = _re_one_line_import_struct.search(_UpperCamelCase ).groups()[0] UpperCAmelCase_ = re.findall(R'''\[([^\]]+)\]''' , _UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue UpperCAmelCase_ = _re_import_struct_key_value.search(_UpperCamelCase ) if single_line_import_search is not None: UpperCAmelCase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_UpperCamelCase ) > 0] objects.extend(_UpperCamelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 UpperCAmelCase_ = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCAmelCase_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): UpperCAmelCase_ = lines[line_index] if _re_import_struct_add_one.search(_UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(_UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(_UpperCamelCase ) is not None: UpperCAmelCase_ = _re_import_struct_add_many.search(_UpperCamelCase ).groups()[0].split(''', ''' ) UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0] objects.extend(_UpperCamelCase ) elif _re_between_brackets.search(_UpperCamelCase ) is not None: UpperCAmelCase_ = _re_between_brackets.search(_UpperCamelCase ).groups()[0].split(''', ''' ) UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0] objects.extend(_UpperCamelCase ) elif _re_quote_object.search(_UpperCamelCase ) is not None: objects.append(_re_quote_object.search(_UpperCamelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 UpperCAmelCase_ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCAmelCase_ = [] while ( line_index < len(_UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): UpperCAmelCase_ = lines[line_index] UpperCAmelCase_ = _re_import.search(_UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCAmelCase_ = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. UpperCAmelCase_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): UpperCAmelCase_ = lines[line_index] UpperCAmelCase_ = _re_import.search(_UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 UpperCAmelCase_ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ): '''simple docstring''' def find_duplicates(_UpperCamelCase : Tuple ): return [k for k, v in collections.Counter(_UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCAmelCase_ = [] for key in import_dict_objects.keys(): UpperCAmelCase_ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) UpperCAmelCase_ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCAmelCase_ = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = [] for root, _, files in os.walk(_UpperCamelCase ): if "__init__.py" in files: UpperCAmelCase_ = os.path.join(_UpperCamelCase , '''__init__.py''' ) UpperCAmelCase_ = parse_init(_UpperCamelCase ) if objects is not None: UpperCAmelCase_ = analyze_results(*_UpperCamelCase ) if len(_UpperCamelCase ) > 0: UpperCAmelCase_ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(_UpperCamelCase ) ) if len(_UpperCamelCase ) > 0: raise ValueError('''\n\n'''.join(_UpperCamelCase ) ) def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = [] for path, directories, files in os.walk(_UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue UpperCAmelCase_ = str((Path(_UpperCamelCase ) / folder).relative_to(_UpperCamelCase ) ) UpperCAmelCase_ = short_path.replace(os.path.sep , '''.''' ) submodules.append(_UpperCamelCase ) for fname in files: if fname == "__init__.py": continue UpperCAmelCase_ = str((Path(_UpperCamelCase ) / fname).relative_to(_UpperCamelCase ) ) UpperCAmelCase_ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_UpperCamelCase ) return submodules lowercase__ : Union[str, Any] = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def __lowerCamelCase ( ): '''simple docstring''' from transformers.utils import direct_transformers_import UpperCAmelCase_ = direct_transformers_import(_UpperCamelCase ) UpperCAmelCase_ = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f: UpperCAmelCase_ = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , _UpperCamelCase ) ) ) UpperCAmelCase_ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_UpperCamelCase ) > 0: UpperCAmelCase_ = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
43
0
'''simple docstring''' from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract lowercase__ : List[str] = logging.get_logger(__name__) def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : Optional[int] ): '''simple docstring''' return [ int(1000 * (box[0] / width) ), int(1000 * (box[1] / height) ), int(1000 * (box[2] / width) ), int(1000 * (box[3] / height) ), ] def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] ): '''simple docstring''' UpperCAmelCase_ = to_pil_image(lowerCamelCase_ ) UpperCAmelCase_ = pil_image.size UpperCAmelCase_ = pytesseract.image_to_data(lowerCamelCase_ , lang=lowerCamelCase_ , output_type='''dict''' , config=lowerCamelCase_ ) UpperCAmelCase_ = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""] # filter empty words and corresponding coordinates UpperCAmelCase_ = [idx for idx, word in enumerate(lowerCamelCase_ ) if not word.strip()] UpperCAmelCase_ = [word for idx, word in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices] UpperCAmelCase_ = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices] UpperCAmelCase_ = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices] UpperCAmelCase_ = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices] UpperCAmelCase_ = [coord for idx, coord in enumerate(lowerCamelCase_ ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format UpperCAmelCase_ = [] for x, y, w, h in zip(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): UpperCAmelCase_ = [x, y, x + w, y + h] actual_boxes.append(lowerCamelCase_ ) # finally, normalize the bounding boxes UpperCAmelCase_ = [] for box in actual_boxes: normalized_boxes.append(normalize_box(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ), "Not as many words as there are bounding boxes" return words, normalized_boxes class lowerCamelCase ( lowercase_ ): '''simple docstring''' lowerCAmelCase__ = ['''pixel_values'''] def __init__( self : Optional[Any] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : float = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[float, Iterable[float]] = None , UpperCAmelCase__ : Union[float, Iterable[float]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "" , **UpperCAmelCase__ : int , ) ->List[str]: super().__init__(**lowerCamelCase_ ) UpperCAmelCase_ = size if size is not None else {"""height""": 224, """width""": 224} UpperCAmelCase_ = get_size_dict(lowerCamelCase_ ) UpperCAmelCase_ = do_resize UpperCAmelCase_ = size UpperCAmelCase_ = resample UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_value UpperCAmelCase_ = do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD UpperCAmelCase_ = apply_ocr UpperCAmelCase_ = ocr_lang UpperCAmelCase_ = tesseract_config def lowerCAmelCase__ ( self : Optional[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Union[str, Any] , ) ->List[Any]: UpperCAmelCase_ = get_size_dict(lowerCamelCase_ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" ) UpperCAmelCase_ = (size["""height"""], size["""width"""]) return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ ) def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[str] , ) ->Optional[int]: return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ ) def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, Iterable[float]] , UpperCAmelCase__ : Union[float, Iterable[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) ->int: return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ ) def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Union[float, Iterable[float]] = None , UpperCAmelCase__ : Union[float, Iterable[float]] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : List[str] , ) ->str: UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ = size if size is not None else self.size UpperCAmelCase_ = get_size_dict(lowerCamelCase_ ) UpperCAmelCase_ = resample if resample is not None else self.resample UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ = image_std if image_std is not None else self.image_std UpperCAmelCase_ = apply_ocr if apply_ocr is not None else self.apply_ocr UpperCAmelCase_ = ocr_lang if ocr_lang is not None else self.ocr_lang UpperCAmelCase_ = tesseract_config if tesseract_config is not None else self.tesseract_config UpperCAmelCase_ = make_list_of_images(lowerCamelCase_ ) if not valid_images(lowerCamelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' ) # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(lowerCamelCase_ ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , '''pytesseract''' ) UpperCAmelCase_ = [] UpperCAmelCase_ = [] for image in images: UpperCAmelCase_ = apply_tesseract(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) words_batch.append(lowerCamelCase_ ) boxes_batch.append(lowerCamelCase_ ) if do_resize: UpperCAmelCase_ = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images] if do_normalize: UpperCAmelCase_ = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images] UpperCAmelCase_ = BatchFeature(data={'''pixel_values''': images} , tensor_type=lowerCamelCase_ ) if apply_ocr: UpperCAmelCase_ = words_batch UpperCAmelCase_ = boxes_batch return data
712
'''simple docstring''' from __future__ import annotations def __lowerCamelCase ( _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ = position UpperCAmelCase_ = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCAmelCase_ = [] for position in positions: UpperCAmelCase_ , UpperCAmelCase_ = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_UpperCamelCase ) return permissible_positions def __lowerCamelCase ( _UpperCamelCase : list[list[int]] ): '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def __lowerCamelCase ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ): '''simple docstring''' if is_complete(_UpperCamelCase ): return True for position in get_valid_pos(_UpperCamelCase , len(_UpperCamelCase ) ): UpperCAmelCase_ , UpperCAmelCase_ = position if board[y][x] == 0: UpperCAmelCase_ = curr + 1 if open_knight_tour_helper(_UpperCamelCase , _UpperCamelCase , curr + 1 ): return True UpperCAmelCase_ = 0 return False def __lowerCamelCase ( _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): UpperCAmelCase_ = 1 if open_knight_tour_helper(_UpperCamelCase , (i, j) , 1 ): return board UpperCAmelCase_ = 0 UpperCAmelCase_ = F"""Open Kight Tour cannot be performed on a board of size {n}""" raise ValueError(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
43
0
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=__lowerCamelCase ) class lowerCamelCase ( __lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) lowerCAmelCase__ = Features({'''audio''': Audio()} ) lowerCAmelCase__ = Features({'''labels''': ClassLabel} ) lowerCAmelCase__ = "audio" lowerCAmelCase__ = "labels" def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : Tuple ) ->str: if self.label_column not in features: raise ValueError(f"""Column {self.label_column} is not present in features.""" ) if not isinstance(features[self.label_column] , UpperCamelCase_ ): raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" ) UpperCAmelCase_ = copy.deepcopy(self ) UpperCAmelCase_ = self.label_schema.copy() UpperCAmelCase_ = features[self.label_column] UpperCAmelCase_ = label_schema return task_template @property def lowerCAmelCase__ ( self : int ) ->Dict: return { self.audio_column: "audio", self.label_column: "labels", }
713
'''simple docstring''' from __future__ import annotations from typing import TypedDict class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''The parameter s type must be str.''' ) return [s[i:] + s[:i] for i in range(len(_UpperCamelCase ) )] def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''The parameter s type must be str.''' ) if not s: raise ValueError('''The parameter s must not be empty.''' ) UpperCAmelCase_ = all_rotations(_UpperCamelCase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation UpperCAmelCase_ = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(_UpperCamelCase ), } return response def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int ): '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''The parameter bwt_string type must be str.''' ) if not bwt_string: raise ValueError('''The parameter bwt_string must not be empty.''' ) try: UpperCAmelCase_ = int(_UpperCamelCase ) except ValueError: raise TypeError( '''The parameter idx_original_string type must be int or passive''' ''' of cast to int.''' ) if idx_original_string < 0: raise ValueError('''The parameter idx_original_string must not be lower than 0.''' ) if idx_original_string >= len(_UpperCamelCase ): raise ValueError( '''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' ) UpperCAmelCase_ = [''''''] * len(_UpperCamelCase ) for _ in range(len(_UpperCamelCase ) ): for i in range(len(_UpperCamelCase ) ): UpperCAmelCase_ = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowercase__ : Optional[int] = "Provide a string that I will generate its BWT transform: " lowercase__ : List[Any] = input(entry_msg).strip() lowercase__ : Any = bwt_transform(s) print( F'''Burrows Wheeler transform for string \'{s}\' results ''' F'''in \'{result['bwt_string']}\'''' ) lowercase__ : Dict = reverse_bwt(result["bwt_string"], result["idx_original_string"]) print( F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' ''' F'''we get original string \'{original_string}\'''' )
43
0
'''simple docstring''' import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments lowercase__ : Optional[Any] = logging.getLogger(__name__) @dataclass class lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' lowerCAmelCase__ = field( default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} ) lowerCAmelCase__ = field(default=_UpperCamelCase , metadata={'''help''': '''Whether to SortishSamler or not.'''} ) lowerCAmelCase__ = field( default=_UpperCamelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) lowerCAmelCase__ = field(default=_UpperCamelCase , metadata={'''help''': '''whether to use adafactor'''} ) lowerCAmelCase__ = field( default=_UpperCamelCase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} ) lowerCAmelCase__ = field( default=_UpperCamelCase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} ) lowerCAmelCase__ = field(default=_UpperCamelCase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} ) lowerCAmelCase__ = field( default=_UpperCamelCase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} ) lowerCAmelCase__ = field( default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
714
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowercase__ : Union[str, Any] = { "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Union[str, Any] = ["MobileViTFeatureExtractor"] lowercase__ : List[Any] = ["MobileViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Dict = [ "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTForImageClassification", "MobileViTForSemanticSegmentation", "MobileViTModel", "MobileViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Optional[int] = [ "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", "TFMobileViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys lowercase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
43
0
'''simple docstring''' import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging lowercase__ : int = logging.get_logger(__name__) logging.set_verbosity_info() def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ): '''simple docstring''' if "xprophetnet" in prophetnet_checkpoint_path: UpperCAmelCase_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(a__ ) UpperCAmelCase_ , UpperCAmelCase_ = XLMProphetNetForConditionalGeneration.from_pretrained( a__ , output_loading_info=a__ ) else: UpperCAmelCase_ = ProphetNetForConditionalGenerationOld.from_pretrained(a__ ) UpperCAmelCase_ , UpperCAmelCase_ = ProphetNetForConditionalGeneration.from_pretrained( a__ , output_loading_info=a__ ) UpperCAmelCase_ = ['''key_proj''', '''value_proj''', '''query_proj'''] UpperCAmelCase_ = { '''self_attn''': '''ngram_self_attn''', '''cross_attn''': '''encoder_attn''', '''cross_attn_layer_norm''': '''encoder_attn_layer_norm''', '''feed_forward_layer_norm''': '''final_layer_norm''', '''feed_forward''': '''''', '''intermediate''': '''fc1''', '''output''': '''fc2''', '''key_proj''': '''k_proj''', '''query_proj''': '''q_proj''', '''value_proj''': '''v_proj''', '''word_embeddings''': '''embed_tokens''', '''embeddings_layer_norm''': '''emb_layer_norm''', '''relative_pos_embeddings''': '''relative_linear''', '''ngram_embeddings''': '''ngram_input_embed''', '''position_embeddings''': '''embed_positions''', } for key in loading_info["missing_keys"]: UpperCAmelCase_ = key.split('''.''' ) if attributes[0] == "lm_head": UpperCAmelCase_ = prophet UpperCAmelCase_ = prophet_old else: UpperCAmelCase_ = prophet.prophetnet UpperCAmelCase_ = prophet_old.model UpperCAmelCase_ = False for attribute in attributes: if attribute in mapping: UpperCAmelCase_ = mapping[attribute] if not hasattr(a__ , a__ ) and len(a__ ) > 0: UpperCAmelCase_ = attribute elif hasattr(a__ , a__ ): UpperCAmelCase_ = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" UpperCAmelCase_ = old_model.weight logger.info(F"""{attribute} is initialized.""" ) UpperCAmelCase_ = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" UpperCAmelCase_ = old_model.bias logger.info(F"""{attribute} is initialized""" ) UpperCAmelCase_ = True break elif attribute in special_keys and hasattr(a__ , '''in_proj_weight''' ): UpperCAmelCase_ = old_model.in_proj_weight.shape[0] // 3 UpperCAmelCase_ = getattr(a__ , a__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) UpperCAmelCase_ = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." UpperCAmelCase_ = nn.Parameter(old_model.embed_positions.weight[:512, :] ) UpperCAmelCase_ = True break if attribute.isdigit(): UpperCAmelCase_ = model[int(a__ )] UpperCAmelCase_ = old_model[int(a__ )] else: UpperCAmelCase_ = getattr(a__ , a__ ) if old_attribute == "": UpperCAmelCase_ = old_model else: if not hasattr(a__ , a__ ): raise ValueError(F"""{old_model} does not have {old_attribute}""" ) UpperCAmelCase_ = getattr(a__ , a__ ) if not is_key_init: raise ValueError(F"""{key} was not correctly initialized!""" ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) prophet.save_pretrained(a__ ) if __name__ == "__main__": lowercase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowercase__ : Tuple = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
715
'''simple docstring''' lowercase__ : Union[str, Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" lowercase__ : str = [{"type": "code", "content": INSTALL_CONTENT}] lowercase__ : Any = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
43
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowercase__ : Union[str, Any] = { "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Dict = ["LlamaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : str = ["LlamaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Dict = [ "LlamaForCausalLM", "LlamaModel", "LlamaPreTrainedModel", "LlamaForSequenceClassification", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
716
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase__ : Optional[Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : List[str] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys lowercase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
43
0
'''simple docstring''' from __future__ import annotations from typing import Any def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ): '''simple docstring''' create_state_space_tree(_UpperCamelCase , [] , 0 ) def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] ): '''simple docstring''' if index == len(_UpperCamelCase ): print(_UpperCamelCase ) return create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 ) current_subsequence.pop() if __name__ == "__main__": lowercase__ : list[Any] = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(["A", "B", "C"]) generate_all_subsequences(seq)
717
'''simple docstring''' from heapq import heappop, heappush import numpy as np def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : bool , ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ = grid.shape UpperCAmelCase_ = [-1, 1, 0, 0] UpperCAmelCase_ = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] UpperCAmelCase_ , UpperCAmelCase_ = [(0, source)], set() UpperCAmelCase_ = np.full((rows, cols) , np.inf ) UpperCAmelCase_ = 0 UpperCAmelCase_ = np.empty((rows, cols) , dtype=_UpperCamelCase ) UpperCAmelCase_ = None while queue: ((UpperCAmelCase_) , (UpperCAmelCase_)) = heappop(_UpperCamelCase ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: UpperCAmelCase_ = [] while (x, y) != source: path.append((x, y) ) UpperCAmelCase_ , UpperCAmelCase_ = predecessors[x, y] path.append(_UpperCamelCase ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(_UpperCamelCase ) ): UpperCAmelCase_ , UpperCAmelCase_ = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: UpperCAmelCase_ = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(_UpperCamelCase , (dist + 1, (nx, ny)) ) UpperCAmelCase_ = dist + 1 UpperCAmelCase_ = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
43
0
'''simple docstring''' def __lowerCamelCase ( _UpperCamelCase : Optional[int] ): '''simple docstring''' UpperCAmelCase_ = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def __lowerCamelCase ( _UpperCamelCase : Optional[Any] = 5000 ): '''simple docstring''' UpperCAmelCase_ = [(i * (3 * i - 1)) // 2 for i in range(1 , _UpperCamelCase )] for i, pentagonal_i in enumerate(_UpperCamelCase ): for j in range(_UpperCamelCase , len(_UpperCamelCase ) ): UpperCAmelCase_ = pentagonal_nums[j] UpperCAmelCase_ = pentagonal_i + pentagonal_j UpperCAmelCase_ = pentagonal_j - pentagonal_i if is_pentagonal(_UpperCamelCase ) and is_pentagonal(_UpperCamelCase ): return b return -1 if __name__ == "__main__": print(F'''{solution() = }''')
718
'''simple docstring''' import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = XLMTokenizer lowerCAmelCase__ = False def lowerCAmelCase__ ( self : int ) ->Union[str, Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) UpperCAmelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase__ ) ) def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any ) ->List[Any]: UpperCAmelCase_ = '''lower newer''' UpperCAmelCase_ = '''lower newer''' return input_text, output_text def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple: UpperCAmelCase_ = XLMTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase_ = '''lower''' UpperCAmelCase_ = ['''low''', '''er</w>'''] UpperCAmelCase_ = tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = tokens + ['''<unk>'''] UpperCAmelCase_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Any ) ->str: UpperCAmelCase_ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' ) UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
43
0
'''simple docstring''' import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowercase__ : Dict = "platform" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase : '''simple docstring''' lowerCAmelCase__ = PegasusConfig lowerCAmelCase__ = {} lowerCAmelCase__ = '''gelu''' def __init__( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=13 , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Tuple=99 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Any=20 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Tuple=0 , ) ->Optional[Any]: UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = eos_token_id UpperCAmelCase_ = pad_token_id UpperCAmelCase_ = bos_token_id def lowerCAmelCase__ ( self : List[str] ) ->int: UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) UpperCAmelCase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase_ = np.concatenate([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCAmelCase_ = prepare_pegasus_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return config, inputs_dict def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) ->Tuple: UpperCAmelCase_ = 20 UpperCAmelCase_ = model_class_name(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase_ , UpperCAmelCase_ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) UpperCAmelCase_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , ) UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=SCREAMING_SNAKE_CASE_ , ) UpperCAmelCase_ = model.decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str ) ->Optional[int]: UpperCAmelCase_ = 20 UpperCAmelCase_ = model_class_name(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase_ , UpperCAmelCase_ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase_ = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , ) UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , decoder_position_ids=SCREAMING_SNAKE_CASE_ , ) UpperCAmelCase_ = model.decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : int=None , ): '''simple docstring''' if attention_mask is None: UpperCAmelCase_ = np.not_equal(__a , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: UpperCAmelCase_ = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) lowerCAmelCase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]: UpperCAmelCase_ = FlaxPegasusModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]: self.config_tester.run_common_tests() def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_ = model_class(SCREAMING_SNAKE_CASE_ ) @jax.jit def encode_jitted(UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : List[Any] ): return model.encode(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase_ = encode_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase_ = encode_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase__ ( self : Optional[Any] ) ->str: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ = model_class(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) UpperCAmelCase_ = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ): return model.decode( decoder_input_ids=SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , encoder_outputs=SCREAMING_SNAKE_CASE_ , ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase_ = decode_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase_ = decode_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]: for model_class_name in self.all_model_classes: UpperCAmelCase_ = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_ = np.ones((1, 1) ) UpperCAmelCase_ = model(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @slow def lowerCAmelCase__ ( self : str ) ->int: UpperCAmelCase_ = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' ) UpperCAmelCase_ = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' ) UpperCAmelCase_ = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning \'Oh I think you\'re nominated\'\", said Dappy.\"And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around.\"At the end of the day we\'re grateful to be where we are in our careers.\"If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ''', ] UpperCAmelCase_ = [ '''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''', '''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''', ] UpperCAmelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' , truncation=SCREAMING_SNAKE_CASE_ , max_length=512 , padding=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_ = model.generate(**SCREAMING_SNAKE_CASE_ , num_beams=2 ).sequences UpperCAmelCase_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) assert tgt_text == decoded
719
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ): '''simple docstring''' for param in module.parameters(): UpperCAmelCase_ = False def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCAmelCase_ = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ = plt.imshow(_UpperCamelCase ) fig.axes.get_xaxis().set_visible(_UpperCamelCase ) fig.axes.get_yaxis().set_visible(_UpperCamelCase ) plt.show() def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = datetime.now() UpperCAmelCase_ = current_time.strftime('''%H:%M:%S''' ) return timestamp
43
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowercase__ : Optional[Any] = logging.get_logger(__name__) lowercase__ : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """ctc_proj""", """mask_emb""": """masked_spec_embed""", } lowercase__ : List[str] = [ """ctc_proj""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Any ): '''simple docstring''' for attribute in key.split('''.''' ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models UpperCAmelCase_ = "lm_head" UpperCAmelCase_ = getattr(_UpperCamelCase , _UpperCamelCase ) if weight_type is not None: UpperCAmelCase_ = getattr(_UpperCamelCase , _UpperCamelCase ).shape else: UpperCAmelCase_ = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase_ = value elif weight_type == "weight_g": UpperCAmelCase_ = value elif weight_type == "weight_v": UpperCAmelCase_ = value elif weight_type == "bias": UpperCAmelCase_ = value else: UpperCAmelCase_ = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any ): '''simple docstring''' UpperCAmelCase_ = [] UpperCAmelCase_ = fairseq_model.state_dict() UpperCAmelCase_ = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase_ = False if "conv_layers" in name: load_conv_layer( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase_ = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase_ = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: UpperCAmelCase_ = True if "*" in mapped_key: UpperCAmelCase_ = name.split(_UpperCamelCase )[0].split('''.''' )[-2] UpperCAmelCase_ = mapped_key.replace('''*''' , _UpperCamelCase ) if "weight_g" in name: UpperCAmelCase_ = "weight_g" elif "weight_v" in name: UpperCAmelCase_ = "weight_v" elif "bias" in name: UpperCAmelCase_ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase_ = "weight" else: UpperCAmelCase_ = None set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) continue if not is_used: unused_weights.append(_UpperCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : str ): '''simple docstring''' UpperCAmelCase_ = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase_ = name.split('''.''' ) UpperCAmelCase_ = int(items[0] ) UpperCAmelCase_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase_ = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase_ = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) UpperCAmelCase_ = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase_ = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_UpperCamelCase ) @torch.no_grad() def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : str=True ): '''simple docstring''' if config_path is not None: UpperCAmelCase_ = UniSpeechConfig.from_pretrained(_UpperCamelCase ) else: UpperCAmelCase_ = UniSpeechConfig() if is_finetuned: if dict_path: UpperCAmelCase_ = Dictionary.load_from_json(_UpperCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase_ = target_dict.pad_index UpperCAmelCase_ = target_dict.bos_index UpperCAmelCase_ = target_dict.eos_index UpperCAmelCase_ = len(target_dict.symbols ) UpperCAmelCase_ = os.path.join(_UpperCamelCase , '''vocab.json''' ) if not os.path.isdir(_UpperCamelCase ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_UpperCamelCase ) ) return os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) UpperCAmelCase_ = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase_ = 42 UpperCAmelCase_ = 43 with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = WavaVecaPhonemeCTCTokenizer( _UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_UpperCamelCase , ) UpperCAmelCase_ = True if config.feat_extract_norm == "layer" else False UpperCAmelCase_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , ) UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase ) processor.save_pretrained(_UpperCamelCase ) UpperCAmelCase_ = UniSpeechForCTC(_UpperCamelCase ) else: UpperCAmelCase_ = UniSpeechForPreTraining(_UpperCamelCase ) if is_finetuned: UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} ) else: UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) UpperCAmelCase_ = model[0].eval() recursively_load_weights(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) hf_unispeech.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowercase__ : Dict = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) lowercase__ : Optional[int] = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
720
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Any ) ->Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self : str ) ->List[str]: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_euler''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase__ ( self : List[str] ) ->int: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_euler''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=UpperCAmelCase__ , ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array( [0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
43
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase__ : Optional[int] = logging.get_logger(__name__) lowercase__ : Tuple = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class lowerCamelCase ( lowercase__ ): '''simple docstring''' lowerCAmelCase__ = '''ibert''' def __init__( self : int , UpperCAmelCase__ : Any=3_0522 , UpperCAmelCase__ : Tuple=768 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : List[Any]=3072 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Tuple=512 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : str=1e-12 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Union[str, Any]="absolute" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : str="none" , **UpperCAmelCase__ : int , ) ->Optional[Any]: super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = hidden_act UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = position_embedding_type UpperCAmelCase_ = quant_mode UpperCAmelCase_ = force_dequant class lowerCamelCase ( lowercase__ ): '''simple docstring''' @property def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]: if self.task == "multiple-choice": UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
721
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = MvpTokenizer lowerCAmelCase__ = MvpTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = filter_roberta_detectors def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple: super().setUp() UpperCAmelCase_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) UpperCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] UpperCAmelCase_ = {'''unk_token''': '''<unk>'''} UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase__ ) ) def lowerCAmelCase__ ( self : Tuple , **UpperCAmelCase__ : List[str] ) ->Dict: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Optional[int] , **UpperCAmelCase__ : int ) ->Tuple: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ) ->Union[str, Any]: return "lower newer", "lower newer" @cached_property def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]: return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' ) @cached_property def lowerCAmelCase__ ( self : Tuple ) ->Tuple: return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' ) @require_torch def lowerCAmelCase__ ( self : Any ) ->Dict: UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] UpperCAmelCase_ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , return_tensors='''pt''' ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) UpperCAmelCase_ = batch.input_ids.tolist()[0] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # Test that special tokens are reset @require_torch def lowerCAmelCase__ ( self : str ) ->int: UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='''pt''' ) # check if input_ids are returned and no labels self.assertIn('''input_ids''' , UpperCAmelCase__ ) self.assertIn('''attention_mask''' , UpperCAmelCase__ ) self.assertNotIn('''labels''' , UpperCAmelCase__ ) self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase__ ) @require_torch def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]: UpperCAmelCase_ = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(text_target=UpperCAmelCase__ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def lowerCAmelCase__ ( self : List[str] ) ->int: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer( ['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='''pt''' ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 1024) ) @require_torch def lowerCAmelCase__ ( self : Dict ) ->Optional[int]: UpperCAmelCase_ = ['''A long paragraph for summarization.'''] UpperCAmelCase_ = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , text_target=UpperCAmelCase__ , return_tensors='''pt''' ) UpperCAmelCase_ = inputs['''input_ids'''] UpperCAmelCase_ = inputs['''labels'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def lowerCAmelCase__ ( self : str ) ->Optional[Any]: pass def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = '''A, <mask> AllenNLP sentence.''' UpperCAmelCase_ = tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
43
0
'''simple docstring''' import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowercase__ : Dict = False lowercase__ : Optional[Any] = logging.get_logger(__name__) lowercase__ : Union[str, Any] = "ybelkada/fonts" def __lowerCamelCase ( ): '''simple docstring''' if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """ '''Pix2StructImageProcessor. Please upgrade torch.''' ) def __lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict ): '''simple docstring''' requires_backends(_lowerCAmelCase , ['''torch'''] ) _check_torch_version() UpperCAmelCase_ = image_tensor.unsqueeze(0 ) UpperCAmelCase_ = torch.nn.functional.unfold(_lowerCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) ) UpperCAmelCase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _lowerCAmelCase , _lowerCAmelCase , -1 ) UpperCAmelCase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Tuple = 36 , _UpperCamelCase : List[str] = "black" , _UpperCamelCase : Union[str, Any] = "white" , _UpperCamelCase : List[Any] = 5 , _UpperCamelCase : int = 5 , _UpperCamelCase : List[Any] = 5 , _UpperCamelCase : int = 5 , _UpperCamelCase : int = None , _UpperCamelCase : Optional[Any] = None , ): '''simple docstring''' requires_backends(_lowerCAmelCase , '''vision''' ) # Add new lines so that each line is no more than 80 characters. UpperCAmelCase_ = textwrap.TextWrapper(width=80 ) UpperCAmelCase_ = wrapper.wrap(text=_lowerCAmelCase ) UpperCAmelCase_ = '''\n'''.join(_lowerCAmelCase ) if font_bytes is not None and font_path is None: UpperCAmelCase_ = io.BytesIO(_lowerCAmelCase ) elif font_path is not None: UpperCAmelCase_ = font_path else: UpperCAmelCase_ = hf_hub_download(_lowerCAmelCase , '''Arial.TTF''' ) UpperCAmelCase_ = ImageFont.truetype(_lowerCAmelCase , encoding='''UTF-8''' , size=_lowerCAmelCase ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. UpperCAmelCase_ = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , _lowerCAmelCase ) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = temp_draw.textbbox((0, 0) , _lowerCAmelCase , _lowerCAmelCase ) # Create the actual image with a bit of padding around the text. UpperCAmelCase_ = text_width + left_padding + right_padding UpperCAmelCase_ = text_height + top_padding + bottom_padding UpperCAmelCase_ = Image.new('''RGB''' , (image_width, image_height) , _lowerCAmelCase ) UpperCAmelCase_ = ImageDraw.Draw(_lowerCAmelCase ) draw.text(xy=(left_padding, top_padding) , text=_lowerCAmelCase , fill=_lowerCAmelCase , font=_lowerCAmelCase ) return image def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : List[Any] , **_UpperCamelCase : int ): '''simple docstring''' requires_backends(_lowerCAmelCase , '''vision''' ) # Convert to PIL image if necessary UpperCAmelCase_ = to_pil_image(_lowerCAmelCase ) UpperCAmelCase_ = render_text(_lowerCAmelCase , **_lowerCAmelCase ) UpperCAmelCase_ = max(header_image.width , image.width ) UpperCAmelCase_ = int(image.height * (new_width / image.width) ) UpperCAmelCase_ = int(header_image.height * (new_width / header_image.width) ) UpperCAmelCase_ = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary UpperCAmelCase_ = to_numpy_array(_lowerCAmelCase ) if infer_channel_dimension_format(_lowerCAmelCase ) == ChannelDimension.LAST: UpperCAmelCase_ = to_channel_dimension_format(_lowerCAmelCase , ChannelDimension.LAST ) return new_image class lowerCamelCase ( __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ = ["flattened_patches"] def __init__( self : str , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : int = 2048 , UpperCAmelCase__ : bool = False , **UpperCAmelCase__ : int , ) ->Any: super().__init__(**_lowerCamelCase ) UpperCAmelCase_ = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16} UpperCAmelCase_ = do_normalize UpperCAmelCase_ = do_convert_rgb UpperCAmelCase_ = max_patches UpperCAmelCase_ = is_vqa def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : dict , **UpperCAmelCase__ : Union[str, Any] ) ->Any: requires_backends(self.extract_flattened_patches , '''torch''' ) _check_torch_version() # convert to torch UpperCAmelCase_ = to_channel_dimension_format(_lowerCamelCase , ChannelDimension.FIRST ) UpperCAmelCase_ = torch.from_numpy(_lowerCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ = patch_size['''height'''], patch_size['''width'''] UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(_lowerCamelCase ) # maximize scale s.t. UpperCAmelCase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) UpperCAmelCase_ = max(min(math.floor(scale * image_height / patch_height ) , _lowerCamelCase ) , 1 ) UpperCAmelCase_ = max(min(math.floor(scale * image_width / patch_width ) , _lowerCamelCase ) , 1 ) UpperCAmelCase_ = max(num_feasible_rows * patch_height , 1 ) UpperCAmelCase_ = max(num_feasible_cols * patch_width , 1 ) UpperCAmelCase_ = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=_lowerCamelCase , antialias=_lowerCamelCase , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] UpperCAmelCase_ = torch_extract_patches(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) UpperCAmelCase_ = patches.shape UpperCAmelCase_ = patches_shape[1] UpperCAmelCase_ = patches_shape[2] UpperCAmelCase_ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] UpperCAmelCase_ = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] UpperCAmelCase_ = torch.arange(_lowerCamelCase ).reshape([rows, 1] ).repeat(1 , _lowerCamelCase ).reshape([rows * columns, 1] ) UpperCAmelCase_ = torch.arange(_lowerCamelCase ).reshape([1, columns] ).repeat(_lowerCamelCase , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] UpperCAmelCase_ = row_ids.to(torch.floataa ) UpperCAmelCase_ = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] UpperCAmelCase_ = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] UpperCAmelCase_ = torch.nn.functional.pad(_lowerCamelCase , [0, 0, 0, max_patches - (rows * columns)] ).float() UpperCAmelCase_ = to_numpy_array(_lowerCamelCase ) return result def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : str ) ->Optional[int]: if image.dtype == np.uinta: UpperCAmelCase_ = image.astype(np.floataa ) # take mean across the whole `image` UpperCAmelCase_ = np.mean(_lowerCamelCase ) UpperCAmelCase_ = np.std(_lowerCamelCase ) UpperCAmelCase_ = max(_lowerCamelCase , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , **_lowerCamelCase ) def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Dict[str, int]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : int , ) ->Union[str, Any]: UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCAmelCase_ = patch_size if patch_size is not None else self.patch_size UpperCAmelCase_ = max_patches if max_patches is not None else self.max_patches UpperCAmelCase_ = self.is_vqa if kwargs.get('''data_format''' , _lowerCamelCase ) is not None: raise ValueError('''data_format is not an accepted input as the outputs are ''' ) UpperCAmelCase_ = make_list_of_images(_lowerCamelCase ) if not valid_images(_lowerCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCAmelCase_ = [convert_to_rgb(_lowerCamelCase ) for image in images] # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(_lowerCamelCase ) for image in images] if is_vqa: if header_text is None: raise ValueError('''A header text must be provided for VQA models.''' ) UpperCAmelCase_ = kwargs.pop('''font_bytes''' , _lowerCamelCase ) UpperCAmelCase_ = kwargs.pop('''font_path''' , _lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ): UpperCAmelCase_ = [header_text] * len(_lowerCamelCase ) UpperCAmelCase_ = [ render_header(_lowerCamelCase , header_text[i] , font_bytes=_lowerCamelCase , font_path=_lowerCamelCase ) for i, image in enumerate(_lowerCamelCase ) ] if do_normalize: UpperCAmelCase_ = [self.normalize(image=_lowerCamelCase ) for image in images] # convert to torch tensor and permute UpperCAmelCase_ = [ self.extract_flattened_patches(image=_lowerCamelCase , max_patches=_lowerCamelCase , patch_size=_lowerCamelCase ) for image in images ] # create attention mask in numpy UpperCAmelCase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] UpperCAmelCase_ = BatchFeature( data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=_lowerCamelCase ) return encoded_outputs
700
'''simple docstring''' def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] ): '''simple docstring''' UpperCAmelCase_ = '''''' for word_or_phrase in separated: if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise Exception('''join() accepts only strings to be joined''' ) joined += word_or_phrase + separator return joined.strip(_UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
43
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor lowercase__ : List[str] = logging.get_logger(__name__) class lowerCamelCase ( lowerCamelCase__ ): '''simple docstring''' def __init__( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Union[str, Any] ) ->None: warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase )
701
'''simple docstring''' import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowercase__ : Optional[int] = logging.get_logger(__name__) def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, Iterable[int]] , _UpperCamelCase : bool , _UpperCamelCase : int ): '''simple docstring''' def constraint_to_multiple_of(_UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : str=None ): UpperCAmelCase_ = round(val / multiple ) * multiple if max_val is not None and x > max_val: UpperCAmelCase_ = math.floor(val / multiple ) * multiple if x < min_val: UpperCAmelCase_ = math.ceil(val / multiple ) * multiple return x UpperCAmelCase_ = (output_size, output_size) if isinstance(_UpperCamelCase , _UpperCamelCase ) else output_size UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ = output_size # determine new height and width UpperCAmelCase_ = output_height / input_height UpperCAmelCase_ = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width UpperCAmelCase_ = scale_width else: # fit height UpperCAmelCase_ = scale_height UpperCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=_UpperCamelCase ) UpperCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=_UpperCamelCase ) return (new_height, new_width) class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = ['''pixel_values'''] def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : str , ) ->None: super().__init__(**UpperCAmelCase__ ) UpperCAmelCase_ = size if size is not None else {'''height''': 384, '''width''': 384} UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) UpperCAmelCase_ = do_resize UpperCAmelCase_ = size UpperCAmelCase_ = keep_aspect_ratio UpperCAmelCase_ = ensure_multiple_of UpperCAmelCase_ = resample UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[str] , ) ->np.ndarray: UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) UpperCAmelCase_ = get_resize_output_image_size( UpperCAmelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCAmelCase__ , multiple=UpperCAmelCase__ , ) return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) ->Any: return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) ->np.ndarray: return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : Any , ) ->PIL.Image.Image: UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ = size if size is not None else self.size UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) UpperCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio UpperCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of UpperCAmelCase_ = resample if resample is not None else self.resample UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ = image_std if image_std is not None else self.image_std UpperCAmelCase_ = make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(UpperCAmelCase__ ) for image in images] if do_resize: UpperCAmelCase_ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images] if do_normalize: UpperCAmelCase_ = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images] UpperCAmelCase_ = {'''pixel_values''': images} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ ) def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None ) ->Optional[Any]: UpperCAmelCase_ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(UpperCAmelCase__ ): UpperCAmelCase_ = target_sizes.numpy() UpperCAmelCase_ = [] for idx in range(len(UpperCAmelCase__ ) ): UpperCAmelCase_ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCAmelCase__ ) UpperCAmelCase_ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCAmelCase__ ) else: UpperCAmelCase_ = logits.argmax(dim=1 ) UpperCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
43
0
'''simple docstring''' import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def __lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg''' UpperCAmelCase_ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' ) UpperCAmelCase_ = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ), ] ) UpperCAmelCase_ = transform(UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ ) return image def __lowerCamelCase ( _UpperCamelCase : Tuple ): '''simple docstring''' if "visual_encoder" in key: UpperCAmelCase_ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , UpperCamelCase__ ) if "blocks" in key: UpperCAmelCase_ = re.sub(R'''blocks''' , '''layers''' , UpperCamelCase__ ) if "attn" in key: UpperCAmelCase_ = re.sub(R'''attn''' , '''self_attn''' , UpperCamelCase__ ) if "norm1" in key: UpperCAmelCase_ = re.sub(R'''norm1''' , '''layer_norm1''' , UpperCamelCase__ ) if "norm2" in key: UpperCAmelCase_ = re.sub(R'''norm2''' , '''layer_norm2''' , UpperCamelCase__ ) if "encoder.norm" in key: UpperCAmelCase_ = re.sub(R'''encoder.norm''' , '''post_layernorm''' , UpperCamelCase__ ) if "encoder.patch_embed.proj" in key: UpperCAmelCase_ = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , UpperCamelCase__ ) if "encoder.pos_embed" in key: UpperCAmelCase_ = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , UpperCamelCase__ ) if "encoder.cls_token" in key: UpperCAmelCase_ = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , UpperCamelCase__ ) if "self_attn" in key: UpperCAmelCase_ = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , UpperCamelCase__ ) return key @torch.no_grad() def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any]=None ): '''simple docstring''' if config_path is not None: UpperCAmelCase_ = BlipConfig.from_pretrained(UpperCamelCase__ ) else: UpperCAmelCase_ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} ) UpperCAmelCase_ = BlipForConditionalGeneration(UpperCamelCase__ ).eval() UpperCAmelCase_ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth''' UpperCAmelCase_ = blip_decoder(pretrained=UpperCamelCase__ , image_size=384 , vit='''base''' ) UpperCAmelCase_ = pt_model.eval() UpperCAmelCase_ = pt_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(UpperCamelCase__ ) UpperCAmelCase_ = rename_key(UpperCamelCase__ ) UpperCAmelCase_ = value hf_model.load_state_dict(UpperCamelCase__ ) UpperCAmelCase_ = 384 UpperCAmelCase_ = load_demo_image(image_size=UpperCamelCase__ , device='''cpu''' ) UpperCAmelCase_ = BertTokenizer.from_pretrained('''bert-base-uncased''' ) UpperCAmelCase_ = tokenizer(['''a picture of'''] ).input_ids UpperCAmelCase_ = hf_model.generate(UpperCamelCase__ , UpperCamelCase__ ) assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] UpperCAmelCase_ = hf_model.generate(UpperCamelCase__ ) assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(UpperCamelCase__ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' UpperCAmelCase_ = ( '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth''' ) UpperCAmelCase_ = blip_vqa(pretrained=UpperCamelCase__ , image_size=UpperCamelCase__ , vit='''base''' ) vqa_model.eval() UpperCAmelCase_ = vqa_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(UpperCamelCase__ ) UpperCAmelCase_ = rename_key(UpperCamelCase__ ) UpperCAmelCase_ = value UpperCAmelCase_ = BlipForQuestionAnswering(UpperCamelCase__ ) hf_vqa_model.load_state_dict(UpperCamelCase__ ) UpperCAmelCase_ = ['''How many dogs are in this image?'''] UpperCAmelCase_ = tokenizer(UpperCamelCase__ , return_tensors='''pt''' ).input_ids UpperCAmelCase_ = hf_vqa_model.generate(UpperCamelCase__ , UpperCamelCase__ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' ) UpperCAmelCase_ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth''' UpperCAmelCase_ = blip_itm(pretrained=UpperCamelCase__ , image_size=UpperCamelCase__ , vit='''base''' ) itm_model.eval() UpperCAmelCase_ = itm_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ = modified_state_dict.pop(UpperCamelCase__ ) UpperCAmelCase_ = rename_key(UpperCamelCase__ ) UpperCAmelCase_ = value UpperCAmelCase_ = BlipForImageTextRetrieval(UpperCamelCase__ ) UpperCAmelCase_ = ['''A picture of a woman with a dog sitting in a beach'''] UpperCAmelCase_ = tokenizer( UpperCamelCase__ , return_tensors='''pt''' , padding='''max_length''' , truncation=UpperCamelCase__ , max_length=35 , ).input_ids hf_itm_model.load_state_dict(UpperCamelCase__ ) hf_itm_model.eval() UpperCAmelCase_ = hf_itm_model(UpperCamelCase__ , UpperCamelCase__ , use_itm_head=UpperCamelCase__ ) UpperCAmelCase_ = hf_itm_model(UpperCamelCase__ , UpperCamelCase__ , use_itm_head=UpperCamelCase__ ) assert out[0].item() == 0.2_110_687_494_277_954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' ) if __name__ == "__main__": lowercase__ : List[Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") lowercase__ : Tuple = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
702
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : int = logging.get_logger(__name__) lowercase__ : List[str] = { "huggingface/time-series-transformer-tourism-monthly": ( "https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''time_series_transformer''' lowerCAmelCase__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "student_t" , UpperCAmelCase__ : str = "nll" , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase__ : Optional[Union[str, bool]] = "mean" , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Tuple , ) ->Optional[int]: # time series specific configuration UpperCAmelCase_ = prediction_length UpperCAmelCase_ = context_length or prediction_length UpperCAmelCase_ = distribution_output UpperCAmelCase_ = loss UpperCAmelCase_ = input_size UpperCAmelCase_ = num_time_features UpperCAmelCase_ = lags_sequence UpperCAmelCase_ = scaling UpperCAmelCase_ = num_dynamic_real_features UpperCAmelCase_ = num_static_real_features UpperCAmelCase_ = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) UpperCAmelCase_ = cardinality else: UpperCAmelCase_ = [0] if embedding_dimension and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) UpperCAmelCase_ = embedding_dimension else: UpperCAmelCase_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] UpperCAmelCase_ = num_parallel_samples # Transformer architecture configuration UpperCAmelCase_ = input_size * len(UpperCAmelCase__ ) + self._number_of_features UpperCAmelCase_ = d_model UpperCAmelCase_ = encoder_attention_heads UpperCAmelCase_ = decoder_attention_heads UpperCAmelCase_ = encoder_ffn_dim UpperCAmelCase_ = decoder_ffn_dim UpperCAmelCase_ = encoder_layers UpperCAmelCase_ = decoder_layers UpperCAmelCase_ = dropout UpperCAmelCase_ = attention_dropout UpperCAmelCase_ = activation_dropout UpperCAmelCase_ = encoder_layerdrop UpperCAmelCase_ = decoder_layerdrop UpperCAmelCase_ = activation_function UpperCAmelCase_ = init_std UpperCAmelCase_ = use_cache super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ ) @property def lowerCAmelCase__ ( self : List[str] ) ->int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
43
0
'''simple docstring''' def __lowerCamelCase ( _UpperCamelCase : Optional[int] = 1000 ): '''simple docstring''' UpperCAmelCase_ = 2**power UpperCAmelCase_ = 0 while n: UpperCAmelCase_ = r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
703
'''simple docstring''' from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig lowercase__ : Dict = logging.get_logger(__name__) lowercase__ : List[Any] = "T5Config" class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''mt5''' lowerCAmelCase__ = MTaConfig class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''mt5''' lowerCAmelCase__ = MTaConfig class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''mt5''' lowerCAmelCase__ = MTaConfig
43
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @property def lowerCAmelCase__ ( self : Any ) ->Tuple: torch.manual_seed(0 ) UpperCAmelCase_ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]: UpperCAmelCase_ = self.dummy_uncond_unet UpperCAmelCase_ = ScoreSdeVeScheduler() UpperCAmelCase_ = ScoreSdeVePipeline(unet=A_ , scheduler=A_ ) sde_ve.to(A_ ) sde_ve.set_progress_bar_config(disable=A_ ) UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=A_ ).images UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=A_ , return_dict=A_ )[ 0 ] UpperCAmelCase_ = image[0, -3:, -3:, -1] UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict: UpperCAmelCase_ = '''google/ncsnpp-church-256''' UpperCAmelCase_ = UNetaDModel.from_pretrained(A_ ) UpperCAmelCase_ = ScoreSdeVeScheduler.from_pretrained(A_ ) UpperCAmelCase_ = ScoreSdeVePipeline(unet=A_ , scheduler=A_ ) sde_ve.to(A_ ) sde_ve.set_progress_bar_config(disable=A_ ) UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=A_ ).images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) UpperCAmelCase_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
704
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets lowercase__ : str = datasets.logging.get_logger(__name__) lowercase__ : Dict = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n" lowercase__ : str = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n" lowercase__ : str = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase__ ( self : List[Any] ) ->Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''sources''': datasets.Value('''string''' , id='''sequence''' ), '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[ '''https://github.com/Unbabel/COMET''', '''https://www.aclweb.org/anthology/2020.emnlp-main.213/''', '''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''', ] , ) def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Union[str, Any] ) ->Any: if self.config_name == "default": UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) ) else: UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=False ) ->Optional[Any]: if gpus is None: UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0 UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references} UpperCAmelCase_ = [dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) for t in zip(*data.values() )] UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(UpperCAmelCase__ , gpus=UpperCAmelCase__ , progress_bar=UpperCAmelCase__ ) return {"mean_score": mean_score, "scores": scores}
43
0
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser lowercase__ : Tuple = logging.getLogger(__name__) torch.set_grad_enabled(False) lowercase__ : int = 'cuda' if torch.cuda.is_available() else 'cpu' def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : List[Any]=100 , _UpperCamelCase : Union[str, Any]=" " ): '''simple docstring''' UpperCAmelCase_ = text.split(_lowerCamelCase ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase )] def __lowerCamelCase ( _UpperCamelCase : dict ): '''simple docstring''' UpperCAmelCase_ = [], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(_lowerCamelCase ): titles.append(title if title is not None else '''''' ) texts.append(_lowerCamelCase ) return {"title": titles, "text": texts} def __lowerCamelCase ( _UpperCamelCase : dict , _UpperCamelCase : DPRContextEncoder , _UpperCamelCase : DPRContextEncoderTokenizerFast ): '''simple docstring''' UpperCAmelCase_ = ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=_lowerCamelCase , padding='''longest''' , return_tensors='''pt''' )["input_ids"] UpperCAmelCase_ = ctx_encoder(input_ids.to(device=_lowerCamelCase ) , return_dict=_lowerCamelCase ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def __lowerCamelCase ( _UpperCamelCase : "RagExampleArguments" , _UpperCamelCase : "ProcessingArguments" , _UpperCamelCase : "IndexHnswArguments" , ): '''simple docstring''' logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way UpperCAmelCase_ = load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words UpperCAmelCase_ = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc ) # And compute the embeddings UpperCAmelCase_ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_lowerCamelCase ) UpperCAmelCase_ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) UpperCAmelCase_ = Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space UpperCAmelCase_ = dataset.map( partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase ) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , ) # And finally save your dataset UpperCAmelCase_ = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(_lowerCamelCase ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search UpperCAmelCase_ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=_lowerCamelCase ) # And save the index UpperCAmelCase_ = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(_lowerCamelCase ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class lowerCamelCase : '''simple docstring''' lowerCAmelCase__ = field( default=str(Path(__lowercase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , ) lowerCAmelCase__ = field( default=__lowercase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , ) lowerCAmelCase__ = field( default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , ) lowerCAmelCase__ = field( default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={ '''help''': ( '''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or''' ''' \'facebook/dpr-ctx_encoder-multiset-base\'''' ) } , ) lowerCAmelCase__ = field( default=str(Path(__lowercase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , ) @dataclass class lowerCamelCase : '''simple docstring''' lowerCAmelCase__ = field( default=__lowercase , metadata={ '''help''': '''The number of processes to use to split the documents into passages. Default is single process.''' } , ) lowerCAmelCase__ = field( default=16 , metadata={ '''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.''' } , ) @dataclass class lowerCamelCase : '''simple docstring''' lowerCAmelCase__ = field( default=7_68 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , ) lowerCAmelCase__ = field( default=1_28 , metadata={ '''help''': ( '''The number of bi-directional links created for every new element during the HNSW index construction.''' ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) lowercase__ : List[Any] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) lowercase__ : Tuple = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: lowercase__ : Any = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
705
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Union[str, Any]=400 , UpperCAmelCase__ : List[Any]=3 , ) ->Dict: UpperCAmelCase_ = parent UpperCAmelCase_ = do_resize UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288} UpperCAmelCase_ = size_divisor UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = do_center_crop UpperCAmelCase_ = image_mean UpperCAmelCase_ = image_std UpperCAmelCase_ = do_pad UpperCAmelCase_ = batch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = min_resolution UpperCAmelCase_ = max_resolution def lowerCAmelCase__ ( self : Tuple ) ->List[Any]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=False ) ->Any: if not batched: UpperCAmelCase_ = self.size['''shortest_edge'''] UpperCAmelCase_ = image_inputs[0] if isinstance(UpperCAmelCase__ , Image.Image ): UpperCAmelCase_ , UpperCAmelCase_ = image.size else: UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2] UpperCAmelCase_ = size / min(UpperCAmelCase__ , UpperCAmelCase__ ) if h < w: UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w else: UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size UpperCAmelCase_ = int((1333 / 800) * size ) if max(UpperCAmelCase__ , UpperCAmelCase__ ) > max_size: UpperCAmelCase_ = max_size / max(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = newh * scale UpperCAmelCase_ = neww * scale UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5 ), int(neww + 0.5 ) UpperCAmelCase_ , UpperCAmelCase_ = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: UpperCAmelCase_ = [] for image in image_inputs: UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0] UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = BridgeTowerImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self : Optional[int] ) ->str: UpperCAmelCase_ = BridgeTowerImageProcessingTester(self ) @property def lowerCAmelCase__ ( self : List[str] ) ->Dict: return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self : Optional[int] ) ->int: UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''size_divisor''' ) ) def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]: pass def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self : Any ) ->Optional[int]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self : int ) ->List[str]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
43
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor lowercase__ : Union[str, Any] = logging.get_logger(__name__) class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' def __init__( self : Optional[int] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : str ) ->None: warnings.warn( '''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ChineseCLIPImageProcessor instead.''' , __UpperCamelCase , ) super().__init__(*__UpperCamelCase , **__UpperCamelCase )
706
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Dict ) ->List[str]: UpperCAmelCase_ = tf.convert_to_tensor( [ [ 8.222_0991, # 3rd highest value; idx. 0 -0.562_0044, 5.2322_9752, 4.038_6393, -6.879_8378, -0.5478_5802, -3.201_2153, 2.9277_7176, 1.8817_1953, 7.3534_1276, # 5th highest value; idx. 9 8.4320_7833, # 2nd highest value; idx. 10 -9.8571_1836, -5.9620_9236, -1.1303_9161, -7.111_5294, -0.836_9633, -5.318_6408, 7.0642_7407, 0.8136_9344, -0.8202_3817, -5.917_9796, 0.5881_3443, -6.9977_8438, 4.7155_1189, -0.1877_1637, 7.4402_0759, # 4th highest value; idx. 25 9.3845_0987, # 1st highest value; idx. 26 2.1266_2941, -9.3256_2038, 2.3565_2522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.5842_5518, 4.5313_9238, -5.5751_0464, -6.2803_0699, -7.1952_9503, -4.0212_2551, 1.3933_7037, -6.0670_7057, 1.5948_0517, -9.64_3119, 0.0390_7799, 0.6723_1762, -8.8820_6726, 6.2711_5922, # 4th highest value; idx. 13 2.2852_0723, 4.8276_7506, 4.3042_1368, 8.827_5313, # 2nd highest value; idx. 17 5.4402_9958, # 5th highest value; idx. 18 -4.473_5794, 7.3857_9536, # 3rd highest value; idx. 20 -2.9105_1663, 2.6194_6077, -2.567_4762, -9.4895_9302, -4.0292_2645, -1.3541_6918, 9.6770_2323, # 1st highest value; idx. 27 -5.8947_8553, 1.8537_0467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) UpperCAmelCase_ = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above UpperCAmelCase_ = tf.convert_to_tensor( [8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above UpperCAmelCase_ = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) UpperCAmelCase_ = output[output != -float('''inf''' )] UpperCAmelCase_ = tf.cast( tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-12 ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @require_tf class lowerCamelCase ( unittest.TestCase , lowerCamelCase ): '''simple docstring''' if is_tf_available(): lowerCAmelCase__ = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]: # TF-only test: tf.saved_model export UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 2 UpperCAmelCase_ = 2 class lowerCamelCase ( tf.Module ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : List[str] ) ->Dict: super(UpperCAmelCase__ , self ).__init__() UpperCAmelCase_ = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=UpperCAmelCase__ , ) def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ) ->int: UpperCAmelCase_ = self.model.generate( input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , ) return {"sequences": outputs["sequences"]} UpperCAmelCase_ = [[2, 0], [102, 103]] UpperCAmelCase_ = [[1, 0], [1, 1]] UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} ) UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default'''] for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ): UpperCAmelCase_ = { '''input_ids''': tf.constant(dummy_input_ids[:batch_size] ), '''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ), } UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences'''] UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]: # TF-only test: tf.saved_model export UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 1 UpperCAmelCase_ = 2 class lowerCamelCase ( tf.Module ): '''simple docstring''' def __init__( self : int , UpperCAmelCase__ : int ) ->List[str]: super(UpperCAmelCase__ , self ).__init__() UpperCAmelCase_ = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=UpperCAmelCase__ , ) def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) ->int: UpperCAmelCase_ = self.model.generate( input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , ) return {"sequences": outputs["sequences"]} UpperCAmelCase_ = [[2], [102, 103]] UpperCAmelCase_ = [[1], [1, 1]] UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} ) UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default'''] for input_row in range(len(UpperCAmelCase__ ) ): UpperCAmelCase_ = { '''input_ids''': tf.constant([dummy_input_ids[input_row]] ), '''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ), } UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences'''] UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @slow @require_tensorflow_text def lowerCAmelCase__ ( self : Optional[Any] ) ->int: # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCAmelCase__ ) class lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ) ->Any: super().__init__() UpperCAmelCase_ = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , '''spiece.model''' ) , '''rb''' ).read() ) UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : str ) ->List[str]: UpperCAmelCase_ = self.tokenizer.tokenize(UpperCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = text.pad_model_inputs( UpperCAmelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) UpperCAmelCase_ = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) return self.tokenizer.detokenize(UpperCAmelCase__ ) UpperCAmelCase_ = CompleteSentenceTransformer() UpperCAmelCase_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' ) UpperCAmelCase_ = complete_model(UpperCAmelCase__ ) UpperCAmelCase_ = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ ) keras_model.save(UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple: # Has PT equivalent: this test relies on random sampling UpperCAmelCase_ = { '''do_sample''': True, '''num_beams''': 1, '''top_p''': 0.7, '''top_k''': 10, '''temperature''': 0.7, } UpperCAmelCase_ = 14 UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = '''Hello, my dog is cute and''' UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''tf''' ) UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) UpperCAmelCase_ = [638, 198] with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]: # Has PT equivalent: ample use of framework-specific code UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = '''Hugging Face is a technology company based in New York and Paris.''' UpperCAmelCase_ = bart_tokenizer(UpperCAmelCase__ , return_tensors='''tf''' ).input_ids UpperCAmelCase_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy() class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int ) ->List[str]: return super().call(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ , foo='''bar''' ).numpy() self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) ) class lowerCamelCase ( bart_model.model.encoder.__class__ ): '''simple docstring''' def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict ) ->Any: return super().call(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = FakeEncoder(bart_model.config , bart_model.model.shared ) UpperCAmelCase_ = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy() with self.assertRaises(UpperCAmelCase__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(UpperCAmelCase__ , foo='''bar''' )
43
0
'''simple docstring''' def __lowerCamelCase ( ): '''simple docstring''' return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] lowercase__ : Any = generate_large_matrix() lowercase__ : int = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __lowerCamelCase ( _UpperCamelCase : list[list[int]] ): '''simple docstring''' assert all(row == sorted(_UpperCamelCase , reverse=_UpperCamelCase ) for row in grid ) assert all(list(_UpperCamelCase ) == sorted(_UpperCamelCase , reverse=_UpperCamelCase ) for col in zip(*_UpperCamelCase ) ) def __lowerCamelCase ( _UpperCamelCase : list[int] ): '''simple docstring''' UpperCAmelCase_ = 0 UpperCAmelCase_ = len(_UpperCamelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: UpperCAmelCase_ = (left + right) // 2 UpperCAmelCase_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: UpperCAmelCase_ = mid + 1 else: UpperCAmelCase_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(_UpperCamelCase ) def __lowerCamelCase ( _UpperCamelCase : list[list[int]] ): '''simple docstring''' UpperCAmelCase_ = 0 UpperCAmelCase_ = len(grid[0] ) for i in range(len(_UpperCamelCase ) ): UpperCAmelCase_ = find_negative_index(grid[i][:bound] ) total += bound return (len(_UpperCamelCase ) * len(grid[0] )) - total def __lowerCamelCase ( _UpperCamelCase : list[list[int]] ): '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def __lowerCamelCase ( _UpperCamelCase : list[list[int]] ): '''simple docstring''' UpperCAmelCase_ = 0 for row in grid: for i, number in enumerate(_UpperCamelCase ): if number < 0: total += len(_UpperCamelCase ) - i break return total def __lowerCamelCase ( ): '''simple docstring''' from timeit import timeit print('''Running benchmarks''' ) UpperCAmelCase_ = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): UpperCAmelCase_ = timeit(F"""{func}(grid=grid)""" , setup=_UpperCamelCase , number=500 ) print(F"""{func}() took {time:0.4f} seconds""" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
707
'''simple docstring''' from collections.abc import Callable def __lowerCamelCase ( _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float , _UpperCamelCase : float ): '''simple docstring''' UpperCAmelCase_ = a UpperCAmelCase_ = b if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function return a elif function(_UpperCamelCase ) == 0: return b elif ( function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: UpperCAmelCase_ = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_UpperCamelCase ) == 0: return mid elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0: UpperCAmelCase_ = mid else: UpperCAmelCase_ = mid UpperCAmelCase_ = start + (end - start) / 2.0 return mid def __lowerCamelCase ( _UpperCamelCase : float ): '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
43
0
'''simple docstring''' from __future__ import annotations def __lowerCamelCase ( _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = 2 UpperCAmelCase_ = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowerCAmelCase__ ) if n > 1: factors.append(lowerCAmelCase__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
708
'''simple docstring''' import re def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )] def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' UpperCAmelCase_ = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : str ): '''simple docstring''' try: UpperCAmelCase_ = split_input(_UpperCamelCase ) if upper: UpperCAmelCase_ = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: UpperCAmelCase_ = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' return to_simple_case(_UpperCamelCase ) def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' try: UpperCAmelCase_ = to_simple_case(_UpperCamelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ): '''simple docstring''' return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''_''' ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ): '''simple docstring''' return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''-''' ) if __name__ == "__main__": __import__("doctest").testmod()
43
0
import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging lowercase__ : Tuple = logging.get_logger(__name__) def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. UpperCAmelCase_ = json.loads(_UpperCamelCase ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. UpperCAmelCase_ = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". UpperCAmelCase_ = json.loads(_UpperCamelCase ) if not mpi_options.get('''sagemaker_mpi_enabled''' , _UpperCamelCase ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('''smdistributed''' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class lowerCamelCase ( a__ ): '''simple docstring''' lowerCAmelCase__ = field( default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , ) def lowerCAmelCase__ ( self : Any ) ->Tuple: super().__post_init__() warnings.warn( '''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ''' '''`TrainingArguments` instead.''' , lowerCamelCase_ , ) @cached_property def lowerCAmelCase__ ( self : Optional[Any] ) ->"torch.device": logger.info('''PyTorch: setting up devices''' ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( '''torch.distributed process group is initialized, but local_rank == -1. ''' '''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' ) if self.no_cuda: UpperCAmelCase_ = torch.device('''cpu''' ) UpperCAmelCase_ = 0 elif is_sagemaker_model_parallel_available(): UpperCAmelCase_ = smp.local_rank() UpperCAmelCase_ = torch.device('''cuda''' , lowerCamelCase_ ) UpperCAmelCase_ = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta ) UpperCAmelCase_ = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) ) UpperCAmelCase_ = torch.device('''cuda''' , self.local_rank ) UpperCAmelCase_ = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 UpperCAmelCase_ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. UpperCAmelCase_ = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta ) UpperCAmelCase_ = torch.device('''cuda''' , self.local_rank ) UpperCAmelCase_ = 1 if device.type == "cuda": torch.cuda.set_device(lowerCamelCase_ ) return device @property def lowerCAmelCase__ ( self : int ) ->Any: if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def lowerCAmelCase__ ( self : Optional[Any] ) ->int: return not is_sagemaker_model_parallel_available() @property def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple: return False
709
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowercase__ : Optional[Any] = "platform" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=None , _UpperCamelCase : int=None , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=None , ): '''simple docstring''' if attention_mask is None: UpperCAmelCase_ = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: UpperCAmelCase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: UpperCAmelCase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCamelCase : '''simple docstring''' def __init__( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[int]=99 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Union[str, Any]=0.02 , ) ->Optional[int]: UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = eos_token_id UpperCAmelCase_ = pad_token_id UpperCAmelCase_ = bos_token_id UpperCAmelCase_ = initializer_range def lowerCAmelCase__ ( self : int ) ->Any: UpperCAmelCase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) UpperCAmelCase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 ) UpperCAmelCase_ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , ) UpperCAmelCase_ = prepare_blenderbot_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return config, inputs_dict def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]: UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs() return config, inputs_dict def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) ->Tuple: UpperCAmelCase_ = 20 UpperCAmelCase_ = model_class_name(UpperCAmelCase__ ) UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase_ , UpperCAmelCase_ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) UpperCAmelCase_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , ) UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , ) UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) ->Union[str, Any]: UpperCAmelCase_ = 20 UpperCAmelCase_ = model_class_name(UpperCAmelCase__ ) UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase_ , UpperCAmelCase_ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase_ = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , ) UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , ) UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ ) UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = 99 def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]: UpperCAmelCase_ = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) UpperCAmelCase_ = input_ids.shape[0] UpperCAmelCase_ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowerCAmelCase__ ( self : Any ) ->str: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_config_and_data() UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ ) UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ ) UpperCAmelCase_ = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : str ) ->int: UpperCAmelCase_ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ ) UpperCAmelCase_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) UpperCAmelCase_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ ) UpperCAmelCase_ = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]: UpperCAmelCase_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 ) UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum() UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(UpperCAmelCase__ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowerCamelCase ( lowerCamelCase , unittest.TestCase , lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = True lowerCAmelCase__ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]: UpperCAmelCase_ = FlaxBlenderbotModelTester(self ) def lowerCAmelCase__ ( self : str ) ->Tuple: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Tuple ) ->str: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Dict ) ->Tuple: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = model_class(UpperCAmelCase__ ) @jax.jit def encode_jitted(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : Union[str, Any] ): return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple() self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase__ ( self : str ) ->str: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ = model_class(UpperCAmelCase__ ) UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) UpperCAmelCase_ = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ): return model.decode( decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple() self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCAmelCase__ ( self : int ) ->int: for model_class_name in self.all_model_classes: UpperCAmelCase_ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids UpperCAmelCase_ = np.ones((1, 1) ) * model.config.eos_token_id UpperCAmelCase_ = model(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]: UpperCAmelCase_ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} UpperCAmelCase_ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCAmelCase__ ) UpperCAmelCase_ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) UpperCAmelCase_ = ['''Sam'''] UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''jax''' ) UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = '''Sam is a great name. It means "sun" in Gaelic.''' UpperCAmelCase_ = tokenizer.batch_decode(UpperCAmelCase__ , **UpperCAmelCase__ ) assert generated_txt[0].strip() == tgt_text
43
0
'''simple docstring''' from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class lowerCamelCase ( yaml.SafeLoader ): '''simple docstring''' def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : List[str] ) ->Tuple: UpperCAmelCase_ = [self.constructed_objects[key_node] for key_node, _ in node.value] UpperCAmelCase_ = [tuple(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else key for key in keys] UpperCAmelCase_ = Counter(UpperCamelCase__ ) UpperCAmelCase_ = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(f"""Got duplicate yaml keys: {duplicate_keys}""" ) def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str=False ) ->List[Any]: UpperCAmelCase_ = super().construct_mapping(UpperCamelCase__ , deep=UpperCamelCase__ ) self._check_no_duplicates_on_constructed_node(UpperCamelCase__ ) return mapping def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' UpperCAmelCase_ = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: UpperCAmelCase_ = full_content[1:].index('''---''' ) + 1 UpperCAmelCase_ = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_UpperCamelCase ) class lowerCamelCase ( lowercase_ ): '''simple docstring''' lowerCAmelCase__ = {'''train_eval_index'''} # train-eval-index in the YAML metadata @classmethod def lowerCAmelCase__ ( cls : List[Any] , UpperCAmelCase__ : Path ) ->Any: with open(UpperCamelCase__ , encoding='''utf-8''' ) as readme_file: UpperCAmelCase_ , UpperCAmelCase_ = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(UpperCamelCase__ ) else: return cls() def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Path ) ->Union[str, Any]: if path.exists(): with open(UpperCamelCase__ , encoding='''utf-8''' ) as readme_file: UpperCAmelCase_ = readme_file.read() else: UpperCAmelCase_ = None UpperCAmelCase_ = self._to_readme(UpperCamelCase__ ) with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(UpperCamelCase__ ) def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Optional[str] = None ) ->Dict: if readme_content is not None: UpperCAmelCase_ , UpperCAmelCase_ = _split_yaml_from_readme(UpperCamelCase__ ) UpperCAmelCase_ = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: UpperCAmelCase_ = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def lowerCAmelCase__ ( cls : int , UpperCAmelCase__ : str ) ->Dict: UpperCAmelCase_ = yaml.load(UpperCamelCase__ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields UpperCAmelCase_ = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**UpperCamelCase__ ) def lowerCAmelCase__ ( self : int ) ->int: return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=UpperCamelCase__ , allow_unicode=UpperCamelCase__ , encoding='''utf-8''' , ).decode('''utf-8''' ) lowercase__ : Union[str, Any] = { "image-classification": [], "translation": [], "image-segmentation": [], "fill-mask": [], "automatic-speech-recognition": [], "token-classification": [], "sentence-similarity": [], "audio-classification": [], "question-answering": [], "summarization": [], "zero-shot-classification": [], "table-to-text": [], "feature-extraction": [], "other": [], "multiple-choice": [], "text-classification": [], "text-to-image": [], "text2text-generation": [], "zero-shot-image-classification": [], "tabular-classification": [], "tabular-regression": [], "image-to-image": [], "tabular-to-text": [], "unconditional-image-generation": [], "text-retrieval": [], "text-to-speech": [], "object-detection": [], "audio-to-audio": [], "text-generation": [], "conversational": [], "table-question-answering": [], "visual-question-answering": [], "image-to-text": [], "reinforcement-learning": [], "voice-activity-detection": [], "time-series-forecasting": [], "document-question-answering": [], } if __name__ == "__main__": from argparse import ArgumentParser lowercase__ : Union[str, Any] = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.") ap.add_argument("readme_filepath") lowercase__ : List[str] = ap.parse_args() lowercase__ : Union[str, Any] = Path(args.readme_filepath) lowercase__ : Optional[int] = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
710
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowercase__ : Tuple = pytest.mark.integration @pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] ) def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ): '''simple docstring''' inspect_dataset(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = path + '''.py''' assert script_name in os.listdir(_UpperCamelCase ) assert "__pycache__" not in os.listdir(_UpperCamelCase ) @pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.parametrize('''path''' , ['''accuracy'''] ) def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ): '''simple docstring''' inspect_metric(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = path + '''.py''' assert script_name in os.listdir(_UpperCamelCase ) assert "__pycache__" not in os.listdir(_UpperCamelCase ) @pytest.mark.parametrize( '''path, config_name, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ): '''simple docstring''' with pytest.raises(_UpperCamelCase ): get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase ) @pytest.mark.parametrize( '''path, expected''' , [ ('''squad''', '''plain_text'''), ('''acronym_identification''', '''default'''), ('''lhoestq/squad''', '''plain_text'''), ('''lhoestq/test''', '''default'''), ('''lhoestq/demo1''', '''lhoestq--demo1'''), ('''dalle-mini/wit''', '''dalle-mini--wit'''), ] , ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ = get_dataset_config_names(_UpperCamelCase ) assert expected in config_names @pytest.mark.parametrize( '''path, expected_configs, expected_splits_in_first_config''' , [ ('''squad''', ['''plain_text'''], ['''train''', '''validation''']), ('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']), ('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase ) assert list(infos.keys() ) == expected_configs UpperCAmelCase_ = expected_configs[0] assert expected_config in infos UpperCAmelCase_ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( '''path, expected_config, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : str ): '''simple docstring''' UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase ) assert expected_config in infos UpperCAmelCase_ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ): '''simple docstring''' with pytest.raises(_UpperCamelCase ): get_dataset_split_names(_UpperCamelCase , config_name=_UpperCamelCase )
43
0
'''simple docstring''' def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ): '''simple docstring''' if n_term == "": return [] UpperCAmelCase_ = [] for temp in range(int(__lowerCAmelCase ) ): series.append(F"""1/{temp + 1}""" if series else '''1''' ) return series if __name__ == "__main__": lowercase__ : Tuple = input("Enter the last number (nth term) of the Harmonic Series") print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n") print(harmonic_series(nth_term))
711
'''simple docstring''' import collections import os import re from pathlib import Path lowercase__ : List[Any] = "src/transformers" # Matches is_xxx_available() lowercase__ : Optional[Any] = re.compile(R"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} lowercase__ : Any = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowercase__ : Union[str, Any] = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available lowercase__ : Optional[int] = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") lowercase__ : List[str] = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowercase__ : Any = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", lowercase__ : List[Any] = re.compile(R"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], lowercase__ : Optional[Any] = re.compile(R"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo lowercase__ : Union[str, Any] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: lowercase__ : int = re.compile(R"^\s*try:") # Catches a line with else: lowercase__ : Any = re.compile(R"^\s*else:") def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ): '''simple docstring''' if _re_test_backend.search(_UpperCamelCase ) is None: return None UpperCAmelCase_ = [b[0] for b in _re_backend.findall(_UpperCamelCase )] backends.sort() return "_and_".join(_UpperCamelCase ) def __lowerCamelCase ( _UpperCamelCase : int ): '''simple docstring''' with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCAmelCase_ = f.readlines() UpperCAmelCase_ = 0 while line_index < len(_UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure UpperCAmelCase_ = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: UpperCAmelCase_ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_UpperCamelCase ): UpperCAmelCase_ = _re_one_line_import_struct.search(_UpperCamelCase ).groups()[0] UpperCAmelCase_ = re.findall(R'''\[([^\]]+)\]''' , _UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue UpperCAmelCase_ = _re_import_struct_key_value.search(_UpperCamelCase ) if single_line_import_search is not None: UpperCAmelCase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_UpperCamelCase ) > 0] objects.extend(_UpperCamelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 UpperCAmelCase_ = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCAmelCase_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): UpperCAmelCase_ = lines[line_index] if _re_import_struct_add_one.search(_UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(_UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(_UpperCamelCase ) is not None: UpperCAmelCase_ = _re_import_struct_add_many.search(_UpperCamelCase ).groups()[0].split(''', ''' ) UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0] objects.extend(_UpperCamelCase ) elif _re_between_brackets.search(_UpperCamelCase ) is not None: UpperCAmelCase_ = _re_between_brackets.search(_UpperCamelCase ).groups()[0].split(''', ''' ) UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0] objects.extend(_UpperCamelCase ) elif _re_quote_object.search(_UpperCamelCase ) is not None: objects.append(_re_quote_object.search(_UpperCamelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 UpperCAmelCase_ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCAmelCase_ = [] while ( line_index < len(_UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): UpperCAmelCase_ = lines[line_index] UpperCAmelCase_ = _re_import.search(_UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCAmelCase_ = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. UpperCAmelCase_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): UpperCAmelCase_ = lines[line_index] UpperCAmelCase_ = _re_import.search(_UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 UpperCAmelCase_ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ): '''simple docstring''' def find_duplicates(_UpperCamelCase : Tuple ): return [k for k, v in collections.Counter(_UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCAmelCase_ = [] for key in import_dict_objects.keys(): UpperCAmelCase_ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) UpperCAmelCase_ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCAmelCase_ = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = [] for root, _, files in os.walk(_UpperCamelCase ): if "__init__.py" in files: UpperCAmelCase_ = os.path.join(_UpperCamelCase , '''__init__.py''' ) UpperCAmelCase_ = parse_init(_UpperCamelCase ) if objects is not None: UpperCAmelCase_ = analyze_results(*_UpperCamelCase ) if len(_UpperCamelCase ) > 0: UpperCAmelCase_ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(_UpperCamelCase ) ) if len(_UpperCamelCase ) > 0: raise ValueError('''\n\n'''.join(_UpperCamelCase ) ) def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = [] for path, directories, files in os.walk(_UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue UpperCAmelCase_ = str((Path(_UpperCamelCase ) / folder).relative_to(_UpperCamelCase ) ) UpperCAmelCase_ = short_path.replace(os.path.sep , '''.''' ) submodules.append(_UpperCamelCase ) for fname in files: if fname == "__init__.py": continue UpperCAmelCase_ = str((Path(_UpperCamelCase ) / fname).relative_to(_UpperCamelCase ) ) UpperCAmelCase_ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_UpperCamelCase ) return submodules lowercase__ : Union[str, Any] = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def __lowerCamelCase ( ): '''simple docstring''' from transformers.utils import direct_transformers_import UpperCAmelCase_ = direct_transformers_import(_UpperCamelCase ) UpperCAmelCase_ = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f: UpperCAmelCase_ = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , _UpperCamelCase ) ) ) UpperCAmelCase_ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_UpperCamelCase ) > 0: UpperCAmelCase_ = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
43
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging lowercase__ : str = logging.get_logger(__name__) lowercase__ : int = "▁" lowercase__ : Tuple = {"vocab_file": "sentencepiece.bpe.model"} lowercase__ : List[Any] = { "vocab_file": { "facebook/mbart-large-en-ro": ( "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model" ), "facebook/mbart-large-cc25": ( "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model" ), } } lowercase__ : int = { "facebook/mbart-large-en-ro": 1024, "facebook/mbart-large-cc25": 1024, } # fmt: off lowercase__ : int = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] class lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = ['''input_ids''', '''attention_mask'''] lowerCAmelCase__ = [] lowerCAmelCase__ = [] def __init__( self : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str="<s>" , UpperCAmelCase__ : Optional[int]="</s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : int="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : List[Any]="<mask>" , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Union[str, Any] , ) ->Dict: # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCAmelCase ) ) UpperCAmelCase_ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token UpperCAmelCase_ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCAmelCase_ = 1 UpperCAmelCase_ = len(self.sp_model ) UpperCAmelCase_ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCAmelCase ) } UpperCAmelCase_ = {v: k for k, v in self.lang_code_to_id.items()} UpperCAmelCase_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) UpperCAmelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} UpperCAmelCase_ = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) UpperCAmelCase_ = src_lang if src_lang is not None else '''en_XX''' UpperCAmelCase_ = self.lang_code_to_id[self._src_lang] UpperCAmelCase_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : Union[str, Any] ) ->Optional[int]: UpperCAmelCase_ = self.__dict__.copy() UpperCAmelCase_ = None UpperCAmelCase_ = self.sp_model.serialized_model_proto() return state def __setstate__( self : List[str] , UpperCAmelCase__ : Optional[Any] ) ->Optional[int]: UpperCAmelCase_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCAmelCase_ = {} UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def lowerCAmelCase__ ( self : List[str] ) ->str: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowerCAmelCase__ ( self : int ) ->Union[str, Any]: return self._src_lang @src_lang.setter def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : List[str] ) ->Any: UpperCAmelCase_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] = None , UpperCAmelCase__ : Optional[Any] = False ) ->int: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) UpperCAmelCase_ = [1] * len(self.prefix_tokens ) UpperCAmelCase_ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict = None ) ->Dict: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict = None ) ->Optional[Any]: UpperCAmelCase_ = [self.sep_token_id] UpperCAmelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[int] ) ->Tuple: if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) UpperCAmelCase_ = src_lang UpperCAmelCase_ = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) UpperCAmelCase_ = self.convert_tokens_to_ids(_UpperCAmelCase ) UpperCAmelCase_ = tgt_lang_id return inputs def lowerCAmelCase__ ( self : List[str] ) ->int: UpperCAmelCase_ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Optional[int] ) ->List[str]: return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : str ) ->Optional[Any]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCAmelCase_ = self.sp_model.PieceToId(_UpperCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowerCAmelCase__ ( self : Optional[Any] , UpperCAmelCase__ : Dict ) ->int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : List[str] ) ->Any: UpperCAmelCase_ = ''''''.join(_UpperCAmelCase ).replace(_UpperCAmelCase , ''' ''' ).strip() return out_string def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] = None ) ->Any: if not os.path.isdir(_UpperCAmelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase_ = os.path.join( _UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , '''wb''' ) as fi: UpperCAmelCase_ = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int = "en_XX" , UpperCAmelCase__ : Dict = None , UpperCAmelCase__ : Optional[Any] = "ro_RO" , **UpperCAmelCase__ : Optional[int] , ) ->Any: UpperCAmelCase_ = src_lang UpperCAmelCase_ = tgt_lang return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]: return self.set_src_lang_special_tokens(self.src_lang ) def lowerCAmelCase__ ( self : Optional[Any] ) ->str: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : str ) ->Tuple: UpperCAmelCase_ = self.lang_code_to_id[src_lang] UpperCAmelCase_ = [] UpperCAmelCase_ = [self.eos_token_id, self.cur_lang_code] def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : str ) ->Optional[int]: UpperCAmelCase_ = self.lang_code_to_id[lang] UpperCAmelCase_ = [] UpperCAmelCase_ = [self.eos_token_id, self.cur_lang_code]
712
'''simple docstring''' from __future__ import annotations def __lowerCamelCase ( _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ = position UpperCAmelCase_ = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCAmelCase_ = [] for position in positions: UpperCAmelCase_ , UpperCAmelCase_ = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_UpperCamelCase ) return permissible_positions def __lowerCamelCase ( _UpperCamelCase : list[list[int]] ): '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def __lowerCamelCase ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ): '''simple docstring''' if is_complete(_UpperCamelCase ): return True for position in get_valid_pos(_UpperCamelCase , len(_UpperCamelCase ) ): UpperCAmelCase_ , UpperCAmelCase_ = position if board[y][x] == 0: UpperCAmelCase_ = curr + 1 if open_knight_tour_helper(_UpperCamelCase , _UpperCamelCase , curr + 1 ): return True UpperCAmelCase_ = 0 return False def __lowerCamelCase ( _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): UpperCAmelCase_ = 1 if open_knight_tour_helper(_UpperCamelCase , (i, j) , 1 ): return board UpperCAmelCase_ = 0 UpperCAmelCase_ = F"""Open Kight Tour cannot be performed on a board of size {n}""" raise ValueError(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
43
0
'''simple docstring''' import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def __lowerCamelCase ( _UpperCamelCase : Any=None , _UpperCamelCase : Dict=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=UpperCAmelCase__ ) @dataclass class lowerCamelCase : '''simple docstring''' lowerCAmelCase__ = field( metadata={'''help''': '''The csv file to plot.'''} , ) lowerCAmelCase__ = field( default=_a , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , ) lowerCAmelCase__ = field( default=_a , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , ) lowerCAmelCase__ = field( default=_a , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , ) lowerCAmelCase__ = field( default=_a , metadata={ '''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.''' } , ) lowerCAmelCase__ = field( default=_a , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , ) lowerCAmelCase__ = list_field( default=_a , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} ) def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' try: int(UpperCAmelCase__ ) return True except ValueError: return False def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' try: float(UpperCAmelCase__ ) return True except ValueError: return False class lowerCamelCase : '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase__ : Any ) ->List[Any]: UpperCAmelCase_ = args UpperCAmelCase_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline='''''' ) as csv_file: UpperCAmelCase_ = csv.DictReader(_A ) for row in reader: UpperCAmelCase_ = row['''model'''] self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) ) self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) ) if can_convert_to_int(row['''result'''] ): # value is not None UpperCAmelCase_ = int(row['''result'''] ) elif can_convert_to_float(row['''result'''] ): # value is not None UpperCAmelCase_ = float(row['''result'''] ) def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple: UpperCAmelCase_ , UpperCAmelCase_ = plt.subplots() UpperCAmelCase_ = '''Time usage''' if self.args.is_time else '''Memory usage''' UpperCAmelCase_ = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference''' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('''log''' ) ax.set_yscale('''log''' ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): UpperCAmelCase_ = sorted(set(self.result_dict[model_name]['''bsz'''] ) ) UpperCAmelCase_ = sorted(set(self.result_dict[model_name]['''seq_len'''] ) ) UpperCAmelCase_ = self.result_dict[model_name]['''result'''] ((UpperCAmelCase_) , (UpperCAmelCase_)) = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) UpperCAmelCase_ = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: UpperCAmelCase_ = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_A , ) else: UpperCAmelCase_ = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((UpperCAmelCase_) , (UpperCAmelCase_)) = ( ('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''') ) UpperCAmelCase_ = np.asarray(_A , _A )[: len(_A )] plt.scatter( _A , _A , label=f"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" ) plt.plot(_A , _A , '''--''' ) title_str += f""" {label_model_name} vs.""" UpperCAmelCase_ = title_str[:-4] UpperCAmelCase_ = '''Time in s''' if self.args.is_time else '''Memory in MB''' # plot plt.title(_A ) plt.xlabel(_A ) plt.ylabel(_A ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = HfArgumentParser(UpperCAmelCase__ ) UpperCAmelCase_ = parser.parse_args_into_dataclasses()[0] UpperCAmelCase_ = Plot(args=UpperCAmelCase__ ) plot.plot() if __name__ == "__main__": main()
713
'''simple docstring''' from __future__ import annotations from typing import TypedDict class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''The parameter s type must be str.''' ) return [s[i:] + s[:i] for i in range(len(_UpperCamelCase ) )] def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''The parameter s type must be str.''' ) if not s: raise ValueError('''The parameter s must not be empty.''' ) UpperCAmelCase_ = all_rotations(_UpperCamelCase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation UpperCAmelCase_ = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(_UpperCamelCase ), } return response def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int ): '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''The parameter bwt_string type must be str.''' ) if not bwt_string: raise ValueError('''The parameter bwt_string must not be empty.''' ) try: UpperCAmelCase_ = int(_UpperCamelCase ) except ValueError: raise TypeError( '''The parameter idx_original_string type must be int or passive''' ''' of cast to int.''' ) if idx_original_string < 0: raise ValueError('''The parameter idx_original_string must not be lower than 0.''' ) if idx_original_string >= len(_UpperCamelCase ): raise ValueError( '''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' ) UpperCAmelCase_ = [''''''] * len(_UpperCamelCase ) for _ in range(len(_UpperCamelCase ) ): for i in range(len(_UpperCamelCase ) ): UpperCAmelCase_ = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowercase__ : Optional[int] = "Provide a string that I will generate its BWT transform: " lowercase__ : List[Any] = input(entry_msg).strip() lowercase__ : Any = bwt_transform(s) print( F'''Burrows Wheeler transform for string \'{s}\' results ''' F'''in \'{result['bwt_string']}\'''' ) lowercase__ : Dict = reverse_bwt(result["bwt_string"], result["idx_original_string"]) print( F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' ''' F'''we get original string \'{original_string}\'''' )
43
0
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore lowercase__ : Optional[Any] = "\nHuman: <<task>>\n\nAssistant: " lowercase__ : Union[str, Any] = "huggingface-tools/default-prompts" lowercase__ : Dict = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"} def __lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str]="run" ): '''simple docstring''' if prompt_or_repo_id is None: UpperCAmelCase_ = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search('''\\s''' , UpperCAmelCase__ ) is not None: return prompt_or_repo_id UpperCAmelCase_ = cached_file( UpperCAmelCase__ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} ) with open(UpperCAmelCase__ , '''r''' , encoding='''utf-8''' ) as f: return f.read()
714
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowercase__ : Union[str, Any] = { "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Union[str, Any] = ["MobileViTFeatureExtractor"] lowercase__ : List[Any] = ["MobileViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Dict = [ "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTForImageClassification", "MobileViTForSemanticSegmentation", "MobileViTModel", "MobileViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Optional[int] = [ "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", "TFMobileViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys lowercase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
43
0
'''simple docstring''' import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors lowercase__ : Dict = logging.getLogger(__name__) class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = """sequence-classification""" def __init__( self : Any , UpperCAmelCase__ : List[str] ) ->Dict: if type(UpperCAmelCase__ ) == dict: UpperCAmelCase_ = Namespace(**UpperCAmelCase__ ) UpperCAmelCase_ = glue_output_modes[hparams.task] UpperCAmelCase_ = glue_tasks_num_labels[hparams.task] super().__init__(UpperCAmelCase__ , UpperCAmelCase__ , self.mode ) def lowerCAmelCase__ ( self : Any , **UpperCAmelCase__ : int ) ->Tuple: return self.model(**UpperCAmelCase__ ) def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str ) ->Optional[Any]: UpperCAmelCase_ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: UpperCAmelCase_ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None UpperCAmelCase_ = self(**UpperCAmelCase__ ) UpperCAmelCase_ = outputs[0] UpperCAmelCase_ = self.trainer.lr_schedulers[0]['''scheduler'''] UpperCAmelCase_ = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def lowerCAmelCase__ ( self : List[Any] ) ->Union[str, Any]: UpperCAmelCase_ = self.hparams UpperCAmelCase_ = processors[args.task]() UpperCAmelCase_ = processor.get_labels() for mode in ["train", "dev"]: UpperCAmelCase_ = self._feature_file(UpperCAmelCase__ ) if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache: logger.info('''Loading features from cached file %s''' , UpperCAmelCase__ ) else: logger.info('''Creating features from dataset file at %s''' , args.data_dir ) UpperCAmelCase_ = ( processor.get_dev_examples(args.data_dir ) if mode == '''dev''' else processor.get_train_examples(args.data_dir ) ) UpperCAmelCase_ = convert_examples_to_features( UpperCAmelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('''Saving features into cached file %s''' , UpperCAmelCase__ ) torch.save(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] = False ) ->Union[str, Any]: UpperCAmelCase_ = '''dev''' if mode == '''test''' else mode UpperCAmelCase_ = self._feature_file(UpperCAmelCase__ ) logger.info('''Loading features from cached file %s''' , UpperCAmelCase__ ) UpperCAmelCase_ = torch.load(UpperCAmelCase__ ) UpperCAmelCase_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) UpperCAmelCase_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) UpperCAmelCase_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": UpperCAmelCase_ = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": UpperCAmelCase_ = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , batch_size=UpperCAmelCase__ , shuffle=UpperCAmelCase__ , ) def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ) ->int: UpperCAmelCase_ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: UpperCAmelCase_ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None UpperCAmelCase_ = self(**UpperCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = outputs[:2] UpperCAmelCase_ = logits.detach().cpu().numpy() UpperCAmelCase_ = inputs['''labels'''].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : List[str] ) ->Tuple: UpperCAmelCase_ = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item() UpperCAmelCase_ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": UpperCAmelCase_ = np.argmax(UpperCAmelCase__ , axis=1 ) elif self.hparams.glue_output_mode == "regression": UpperCAmelCase_ = np.squeeze(UpperCAmelCase__ ) UpperCAmelCase_ = np.concatenate([x['''target'''] for x in outputs] , axis=0 ) UpperCAmelCase_ = [[] for _ in range(out_label_ids.shape[0] )] UpperCAmelCase_ = [[] for _ in range(out_label_ids.shape[0] )] UpperCAmelCase_ = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , UpperCAmelCase__ , UpperCAmelCase__ )} UpperCAmelCase_ = dict(results.items() ) UpperCAmelCase_ = results return ret, preds_list, out_label_list def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Optional[Any] ) ->Optional[Any]: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._eval_end(UpperCAmelCase__ ) UpperCAmelCase_ = ret['''log'''] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) ->str: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._eval_end(UpperCAmelCase__ ) UpperCAmelCase_ = ret['''log'''] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def lowerCAmelCase__ ( UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ) ->Union[str, Any]: BaseTransformer.add_model_specific_args(UpperCAmelCase__ , UpperCAmelCase__ ) parser.add_argument( '''--max_seq_length''' , default=128 , type=UpperCAmelCase__ , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--task''' , default='''''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='''The GLUE task to run''' , ) parser.add_argument( '''--gpus''' , default=0 , type=UpperCAmelCase__ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) return parser def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser() add_generic_args(_UpperCamelCase , os.getcwd() ) UpperCAmelCase_ = GLUETransformer.add_model_specific_args(_UpperCamelCase , os.getcwd() ) UpperCAmelCase_ = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: UpperCAmelCase_ = os.path.join( '''./results''' , F"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , ) os.makedirs(args.output_dir ) UpperCAmelCase_ = GLUETransformer(_UpperCamelCase ) UpperCAmelCase_ = generic_train(_UpperCamelCase , _UpperCamelCase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: UpperCAmelCase_ = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=_UpperCamelCase ) ) UpperCAmelCase_ = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_UpperCamelCase ) if __name__ == "__main__": main()
715
'''simple docstring''' lowercase__ : Union[str, Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" lowercase__ : str = [{"type": "code", "content": INSTALL_CONTENT}] lowercase__ : Any = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
43
0
'''simple docstring''' from math import factorial def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] = 100 ): '''simple docstring''' return sum(int(_UpperCamelCase ) for x in str(factorial(_UpperCamelCase ) ) ) if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
716
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase__ : Optional[Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : List[str] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys lowercase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
43
0
'''simple docstring''' import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() lowercase__ : Optional[Any] = logging.get_logger(__name__) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : int , _UpperCamelCase : Dict ): '''simple docstring''' UpperCAmelCase_ = WavaVecaForSequenceClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase ) UpperCAmelCase_ = downstream_dict['''projector.weight'''] UpperCAmelCase_ = downstream_dict['''projector.bias'''] UpperCAmelCase_ = downstream_dict['''model.post_net.linear.weight'''] UpperCAmelCase_ = downstream_dict['''model.post_net.linear.bias'''] return model def __lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ = WavaVecaForAudioFrameClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase ) UpperCAmelCase_ = downstream_dict['''model.linear.weight'''] UpperCAmelCase_ = downstream_dict['''model.linear.bias'''] return model def __lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : List[Any] ): '''simple docstring''' UpperCAmelCase_ = WavaVecaForXVector.from_pretrained(_UpperCamelCase , config=_UpperCamelCase ) UpperCAmelCase_ = downstream_dict['''connector.weight'''] UpperCAmelCase_ = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): UpperCAmelCase_ = downstream_dict[ F"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] UpperCAmelCase_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] UpperCAmelCase_ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] UpperCAmelCase_ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] UpperCAmelCase_ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] UpperCAmelCase_ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] UpperCAmelCase_ = downstream_dict['''objective.W'''] return model @torch.no_grad() def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Any ): '''simple docstring''' UpperCAmelCase_ = torch.load(_UpperCamelCase , map_location='''cpu''' ) UpperCAmelCase_ = checkpoint['''Downstream'''] UpperCAmelCase_ = WavaVecaConfig.from_pretrained(_UpperCamelCase ) UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained( _UpperCamelCase , return_attention_mask=_UpperCamelCase , do_normalize=_UpperCamelCase ) UpperCAmelCase_ = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): UpperCAmelCase_ = convert_classification(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) elif arch.endswith('''ForAudioFrameClassification''' ): UpperCAmelCase_ = convert_diarization(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) elif arch.endswith('''ForXVector''' ): UpperCAmelCase_ = convert_xvector(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) else: raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: UpperCAmelCase_ = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(_UpperCamelCase ) hf_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowercase__ : Dict = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") lowercase__ : Dict = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
717
'''simple docstring''' from heapq import heappop, heappush import numpy as np def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : bool , ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ = grid.shape UpperCAmelCase_ = [-1, 1, 0, 0] UpperCAmelCase_ = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] UpperCAmelCase_ , UpperCAmelCase_ = [(0, source)], set() UpperCAmelCase_ = np.full((rows, cols) , np.inf ) UpperCAmelCase_ = 0 UpperCAmelCase_ = np.empty((rows, cols) , dtype=_UpperCamelCase ) UpperCAmelCase_ = None while queue: ((UpperCAmelCase_) , (UpperCAmelCase_)) = heappop(_UpperCamelCase ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: UpperCAmelCase_ = [] while (x, y) != source: path.append((x, y) ) UpperCAmelCase_ , UpperCAmelCase_ = predecessors[x, y] path.append(_UpperCamelCase ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(_UpperCamelCase ) ): UpperCAmelCase_ , UpperCAmelCase_ = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: UpperCAmelCase_ = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(_UpperCamelCase , (dist + 1, (nx, ny)) ) UpperCAmelCase_ = dist + 1 UpperCAmelCase_ = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
43
0
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") lowercase__ : Optional[Any] = logging.getLogger(__name__) @dataclass class lowerCamelCase : '''simple docstring''' lowerCAmelCase__ = field( default=1_28 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) lowerCAmelCase__ = field( default=UpperCamelCase_ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) lowerCAmelCase__ = field( default=UpperCamelCase_ , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) lowerCAmelCase__ = field( default=UpperCamelCase_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase__ = field( default=UpperCamelCase_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase__ = field( default=UpperCamelCase_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of prediction examples to this ''' '''value if set.''' ) } , ) @dataclass class lowerCamelCase : '''simple docstring''' lowerCAmelCase__ = field( default=UpperCamelCase_ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) lowerCAmelCase__ = field( default=UpperCamelCase_ , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} ) lowerCAmelCase__ = field( default=UpperCamelCase_ , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} ) lowerCAmelCase__ = field( default=UpperCamelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase__ = field( default=UpperCamelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) lowerCAmelCase__ = field( default=UpperCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) lowerCAmelCase__ = field( default=UpperCamelCase_ , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , ) lowerCAmelCase__ = field( default=UpperCamelCase_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) lowerCAmelCase__ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCAmelCase__ = field( default=UpperCamelCase_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCAmelCase__ = field( default=UpperCamelCase_ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , ) def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase_ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_xnli''' , snake_case_ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase_ = training_args.get_process_log_level() logger.setLevel(snake_case_ ) datasets.utils.logging.set_verbosity(snake_case_ ) transformers.utils.logging.set_verbosity(snake_case_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. UpperCAmelCase_ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase_ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: UpperCAmelCase_ = load_dataset( '''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: UpperCAmelCase_ = load_dataset( '''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase_ = train_dataset.features['''label'''].names if training_args.do_eval: UpperCAmelCase_ = load_dataset( '''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase_ = eval_dataset.features['''label'''].names if training_args.do_predict: UpperCAmelCase_ = load_dataset( '''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase_ = predict_dataset.features['''label'''].names # Labels UpperCAmelCase_ = len(snake_case_ ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase_ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case_ , idalabel={str(snake_case_ ): label for i, label in enumerate(snake_case_ )} , labelaid={label: i for i, label in enumerate(snake_case_ )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase_ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: UpperCAmelCase_ = '''max_length''' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch UpperCAmelCase_ = False def preprocess_function(_UpperCamelCase : Union[str, Any] ): # Tokenize the texts return tokenizer( examples['''premise'''] , examples['''hypothesis'''] , padding=snake_case_ , max_length=data_args.max_seq_length , truncation=snake_case_ , ) if training_args.do_train: if data_args.max_train_samples is not None: UpperCAmelCase_ = min(len(snake_case_ ) , data_args.max_train_samples ) UpperCAmelCase_ = train_dataset.select(range(snake_case_ ) ) with training_args.main_process_first(desc='''train dataset map pre-processing''' ): UpperCAmelCase_ = train_dataset.map( snake_case_ , batched=snake_case_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , ) # Log a few random samples from the training set: for index in random.sample(range(len(snake_case_ ) ) , 3 ): logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" ) if training_args.do_eval: if data_args.max_eval_samples is not None: UpperCAmelCase_ = min(len(snake_case_ ) , data_args.max_eval_samples ) UpperCAmelCase_ = eval_dataset.select(range(snake_case_ ) ) with training_args.main_process_first(desc='''validation dataset map pre-processing''' ): UpperCAmelCase_ = eval_dataset.map( snake_case_ , batched=snake_case_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , ) if training_args.do_predict: if data_args.max_predict_samples is not None: UpperCAmelCase_ = min(len(snake_case_ ) , data_args.max_predict_samples ) UpperCAmelCase_ = predict_dataset.select(range(snake_case_ ) ) with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ): UpperCAmelCase_ = predict_dataset.map( snake_case_ , batched=snake_case_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , ) # Get the metric function UpperCAmelCase_ = evaluate.load('''xnli''' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_UpperCamelCase : EvalPrediction ): UpperCAmelCase_ = p.predictions[0] if isinstance(p.predictions , snake_case_ ) else p.predictions UpperCAmelCase_ = np.argmax(snake_case_ , axis=1 ) return metric.compute(predictions=snake_case_ , references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: UpperCAmelCase_ = default_data_collator elif training_args.fpaa: UpperCAmelCase_ = DataCollatorWithPadding(snake_case_ , pad_to_multiple_of=8 ) else: UpperCAmelCase_ = None # Initialize our Trainer UpperCAmelCase_ = Trainer( model=snake_case_ , args=snake_case_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=snake_case_ , tokenizer=snake_case_ , data_collator=snake_case_ , ) # Training if training_args.do_train: UpperCAmelCase_ = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase_ = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase_ = last_checkpoint UpperCAmelCase_ = trainer.train(resume_from_checkpoint=snake_case_ ) UpperCAmelCase_ = train_result.metrics UpperCAmelCase_ = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case_ ) ) UpperCAmelCase_ = min(snake_case_ , len(snake_case_ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('''train''' , snake_case_ ) trainer.save_metrics('''train''' , snake_case_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase_ = trainer.evaluate(eval_dataset=snake_case_ ) UpperCAmelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case_ ) UpperCAmelCase_ = min(snake_case_ , len(snake_case_ ) ) trainer.log_metrics('''eval''' , snake_case_ ) trainer.save_metrics('''eval''' , snake_case_ ) # Prediction if training_args.do_predict: logger.info('''*** Predict ***''' ) UpperCAmelCase_ = trainer.predict(snake_case_ , metric_key_prefix='''predict''' ) UpperCAmelCase_ = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(snake_case_ ) ) UpperCAmelCase_ = min(snake_case_ , len(snake_case_ ) ) trainer.log_metrics('''predict''' , snake_case_ ) trainer.save_metrics('''predict''' , snake_case_ ) UpperCAmelCase_ = np.argmax(snake_case_ , axis=1 ) UpperCAmelCase_ = os.path.join(training_args.output_dir , '''predictions.txt''' ) if trainer.is_world_process_zero(): with open(snake_case_ , '''w''' ) as writer: writer.write('''index\tprediction\n''' ) for index, item in enumerate(snake_case_ ): UpperCAmelCase_ = label_list[item] writer.write(F"""{index}\t{item}\n""" ) if __name__ == "__main__": main()
718
'''simple docstring''' import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = XLMTokenizer lowerCAmelCase__ = False def lowerCAmelCase__ ( self : int ) ->Union[str, Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) UpperCAmelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase__ ) ) def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any ) ->List[Any]: UpperCAmelCase_ = '''lower newer''' UpperCAmelCase_ = '''lower newer''' return input_text, output_text def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple: UpperCAmelCase_ = XLMTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase_ = '''lower''' UpperCAmelCase_ = ['''low''', '''er</w>'''] UpperCAmelCase_ = tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = tokens + ['''<unk>'''] UpperCAmelCase_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Any ) ->str: UpperCAmelCase_ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' ) UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
43
0
'''simple docstring''' import os import sys lowercase__ : List[Any] = os.path.join(os.path.dirname(__file__), "src") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowercase__ : Any = [ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def __lowerCamelCase ( *_UpperCamelCase : int , **_UpperCamelCase : int ): '''simple docstring''' return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __lowerCamelCase ( *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ): '''simple docstring''' return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ ) @add_start_docstrings(AutoModel.__doc__ ) def __lowerCamelCase ( *_UpperCamelCase : str , **_UpperCamelCase : str ): '''simple docstring''' return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __lowerCamelCase ( *_UpperCamelCase : Optional[int] , **_UpperCamelCase : List[str] ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __lowerCamelCase ( *_UpperCamelCase : str , **_UpperCamelCase : List[Any] ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __lowerCamelCase ( *_UpperCamelCase : int , **_UpperCamelCase : Any ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __lowerCamelCase ( *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Optional[int] ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
719
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ): '''simple docstring''' for param in module.parameters(): UpperCAmelCase_ = False def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCAmelCase_ = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ = plt.imshow(_UpperCamelCase ) fig.axes.get_xaxis().set_visible(_UpperCamelCase ) fig.axes.get_yaxis().set_visible(_UpperCamelCase ) plt.show() def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = datetime.now() UpperCAmelCase_ = current_time.strftime('''%H:%M:%S''' ) return timestamp
43
0
'''simple docstring''' import sys lowercase__ : Dict = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] = N ): '''simple docstring''' UpperCAmelCase_ = -sys.maxsize - 1 for i in range(len(lowerCamelCase__ ) - 12 ): UpperCAmelCase_ = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: UpperCAmelCase_ = product return largest_product if __name__ == "__main__": print(F'''{solution() = }''')
720
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Any ) ->Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self : str ) ->List[str]: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_euler''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase__ ( self : List[str] ) ->int: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_euler''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=UpperCAmelCase__ , ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array( [0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
43
0
'''simple docstring''' import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--model_ckpt''' , type=_UpperCamelCase , default='''microsoft/unixcoder-base-nine''' ) parser.add_argument('''--num_epochs''' , type=_UpperCamelCase , default=5 ) parser.add_argument('''--batch_size''' , type=_UpperCamelCase , default=6 ) parser.add_argument('''--gradient_accumulation_steps''' , type=_UpperCamelCase , default=1 ) parser.add_argument('''--freeze''' , type=_UpperCamelCase , default=_UpperCamelCase ) parser.add_argument('''--learning_rate''' , type=_UpperCamelCase , default=5E-4 ) parser.add_argument('''--seed''' , type=_UpperCamelCase , default=0 ) parser.add_argument('''--lr_scheduler_type''' , type=_UpperCamelCase , default='''cosine''' ) parser.add_argument('''--num_warmup_steps''' , type=_UpperCamelCase , default=10 ) parser.add_argument('''--weight_decay''' , type=_UpperCamelCase , default=0.01 ) parser.add_argument('''--output_dir''' , type=_UpperCamelCase , default='''./results''' ) return parser.parse_args() lowercase__ : Tuple = load("accuracy") def __lowerCamelCase ( _UpperCamelCase : Optional[int] ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ = eval_pred UpperCAmelCase_ = np.argmax(_UpperCamelCase , axis=1 ) return metric.compute(predictions=_UpperCamelCase , references=_UpperCamelCase ) class lowerCamelCase ( snake_case__ ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : Dict ) ->None: super().__init__() UpperCAmelCase_ = trainer def lowerCAmelCase__ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : str ) ->Optional[Any]: if control.should_evaluate: UpperCAmelCase_ = deepcopy(_SCREAMING_SNAKE_CASE ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' ) return control_copy def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = get_args() set_seed(args.seed ) UpperCAmelCase_ = load_dataset('''codeparrot/codecomplex''' , split='''train''' ) UpperCAmelCase_ = dataset.train_test_split(test_size=0.2 ) UpperCAmelCase_ = train_test['''test'''].train_test_split(test_size=0.5 ) UpperCAmelCase_ = DatasetDict( { '''train''': train_test['''train'''], '''test''': test_validation['''train'''], '''valid''': test_validation['''test'''], } ) print('''Loading tokenizer and model''' ) UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt ) UpperCAmelCase_ = tokenizer.eos_token UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) UpperCAmelCase_ = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): UpperCAmelCase_ = False UpperCAmelCase_ = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) ) def tokenize(_UpperCamelCase : List[Any] ): UpperCAmelCase_ = tokenizer(example['''src'''] , truncation=_UpperCamelCase , max_length=1024 ) UpperCAmelCase_ = labels.straint(example['''complexity'''] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } UpperCAmelCase_ = train_test_validation.map( _UpperCamelCase , batched=_UpperCamelCase , remove_columns=train_test_validation['''train'''].column_names , ) UpperCAmelCase_ = DataCollatorWithPadding(tokenizer=_UpperCamelCase ) UpperCAmelCase_ = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , ) UpperCAmelCase_ = Trainer( model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , compute_metrics=_UpperCamelCase , ) print('''Training...''' ) trainer.add_callback(CustomCallback(_UpperCamelCase ) ) trainer.train() if __name__ == "__main__": main()
721
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = MvpTokenizer lowerCAmelCase__ = MvpTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = filter_roberta_detectors def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple: super().setUp() UpperCAmelCase_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) UpperCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] UpperCAmelCase_ = {'''unk_token''': '''<unk>'''} UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase__ ) ) def lowerCAmelCase__ ( self : Tuple , **UpperCAmelCase__ : List[str] ) ->Dict: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Optional[int] , **UpperCAmelCase__ : int ) ->Tuple: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ) ->Union[str, Any]: return "lower newer", "lower newer" @cached_property def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]: return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' ) @cached_property def lowerCAmelCase__ ( self : Tuple ) ->Tuple: return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' ) @require_torch def lowerCAmelCase__ ( self : Any ) ->Dict: UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] UpperCAmelCase_ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , return_tensors='''pt''' ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) UpperCAmelCase_ = batch.input_ids.tolist()[0] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # Test that special tokens are reset @require_torch def lowerCAmelCase__ ( self : str ) ->int: UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='''pt''' ) # check if input_ids are returned and no labels self.assertIn('''input_ids''' , UpperCAmelCase__ ) self.assertIn('''attention_mask''' , UpperCAmelCase__ ) self.assertNotIn('''labels''' , UpperCAmelCase__ ) self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase__ ) @require_torch def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]: UpperCAmelCase_ = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(text_target=UpperCAmelCase__ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def lowerCAmelCase__ ( self : List[str] ) ->int: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer( ['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='''pt''' ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 1024) ) @require_torch def lowerCAmelCase__ ( self : Dict ) ->Optional[int]: UpperCAmelCase_ = ['''A long paragraph for summarization.'''] UpperCAmelCase_ = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , text_target=UpperCAmelCase__ , return_tensors='''pt''' ) UpperCAmelCase_ = inputs['''input_ids'''] UpperCAmelCase_ = inputs['''labels'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def lowerCAmelCase__ ( self : str ) ->Optional[Any]: pass def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = '''A, <mask> AllenNLP sentence.''' UpperCAmelCase_ = tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
43
0
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels lowercase__ : List[Any] = object() # For specifying empty leaf dict `{}` lowercase__ : str = object() def __lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] ): '''simple docstring''' UpperCAmelCase_ = tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(_UpperCamelCase ) - len(_UpperCamelCase ) + 1 ): UpperCAmelCase_ = [x.match(_UpperCamelCase ) for x, y in zip(_UpperCamelCase , ks[i:] )] if matches and all(_UpperCamelCase ): return True return False def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ): '''simple docstring''' def replace(_UpperCamelCase : int , _UpperCamelCase : Optional[int] ): for rule, replacement in rules: if _match(_UpperCamelCase , _UpperCamelCase ): return replacement return val return replace def __lowerCamelCase ( ): '''simple docstring''' return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , _UpperCamelCase )), (("transformer", "wte", "embedding"), P('''mp''' , _UpperCamelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_UpperCamelCase , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , _UpperCamelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(_UpperCamelCase , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , _UpperCamelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' UpperCAmelCase_ = _get_partition_rules() UpperCAmelCase_ = _replacement_rules(_UpperCamelCase ) UpperCAmelCase_ = {k: _unmatched for k in flatten_dict(_UpperCamelCase )} UpperCAmelCase_ = {k: replace(_UpperCamelCase , _UpperCamelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(_UpperCamelCase ) )
700
'''simple docstring''' def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] ): '''simple docstring''' UpperCAmelCase_ = '''''' for word_or_phrase in separated: if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise Exception('''join() accepts only strings to be joined''' ) joined += word_or_phrase + separator return joined.strip(_UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
43
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase__ : Any = logging.get_logger(__name__) lowercase__ : Optional[Any] = { "google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json", "google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''mobilenet_v1''' def __init__( self : str , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Any=1.0 , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : Optional[int]="relu6" , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Tuple=0.999 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : List[Any]=0.001 , **UpperCAmelCase__ : Optional[Any] , ) ->Optional[int]: super().__init__(**UpperCAmelCase__ ) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''' ) UpperCAmelCase_ = num_channels UpperCAmelCase_ = image_size UpperCAmelCase_ = depth_multiplier UpperCAmelCase_ = min_depth UpperCAmelCase_ = hidden_act UpperCAmelCase_ = tf_padding UpperCAmelCase_ = classifier_dropout_prob UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = version.parse('''1.11''' ) @property def lowerCAmelCase__ ( self : int ) ->Mapping[str, Mapping[int, str]]: return OrderedDict([('''pixel_values''', {0: '''batch'''})] ) @property def lowerCAmelCase__ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})] ) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] ) @property def lowerCAmelCase__ ( self : int ) ->float: return 1e-4
701
'''simple docstring''' import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowercase__ : Optional[int] = logging.get_logger(__name__) def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, Iterable[int]] , _UpperCamelCase : bool , _UpperCamelCase : int ): '''simple docstring''' def constraint_to_multiple_of(_UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : str=None ): UpperCAmelCase_ = round(val / multiple ) * multiple if max_val is not None and x > max_val: UpperCAmelCase_ = math.floor(val / multiple ) * multiple if x < min_val: UpperCAmelCase_ = math.ceil(val / multiple ) * multiple return x UpperCAmelCase_ = (output_size, output_size) if isinstance(_UpperCamelCase , _UpperCamelCase ) else output_size UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ = output_size # determine new height and width UpperCAmelCase_ = output_height / input_height UpperCAmelCase_ = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width UpperCAmelCase_ = scale_width else: # fit height UpperCAmelCase_ = scale_height UpperCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=_UpperCamelCase ) UpperCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=_UpperCamelCase ) return (new_height, new_width) class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = ['''pixel_values'''] def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : str , ) ->None: super().__init__(**UpperCAmelCase__ ) UpperCAmelCase_ = size if size is not None else {'''height''': 384, '''width''': 384} UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) UpperCAmelCase_ = do_resize UpperCAmelCase_ = size UpperCAmelCase_ = keep_aspect_ratio UpperCAmelCase_ = ensure_multiple_of UpperCAmelCase_ = resample UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[str] , ) ->np.ndarray: UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) UpperCAmelCase_ = get_resize_output_image_size( UpperCAmelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCAmelCase__ , multiple=UpperCAmelCase__ , ) return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) ->Any: return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) ->np.ndarray: return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : Any , ) ->PIL.Image.Image: UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ = size if size is not None else self.size UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) UpperCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio UpperCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of UpperCAmelCase_ = resample if resample is not None else self.resample UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ = image_std if image_std is not None else self.image_std UpperCAmelCase_ = make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(UpperCAmelCase__ ) for image in images] if do_resize: UpperCAmelCase_ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images] if do_normalize: UpperCAmelCase_ = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images] UpperCAmelCase_ = {'''pixel_values''': images} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ ) def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None ) ->Optional[Any]: UpperCAmelCase_ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(UpperCAmelCase__ ): UpperCAmelCase_ = target_sizes.numpy() UpperCAmelCase_ = [] for idx in range(len(UpperCAmelCase__ ) ): UpperCAmelCase_ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCAmelCase__ ) UpperCAmelCase_ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCAmelCase__ ) else: UpperCAmelCase_ = logits.argmax(dim=1 ) UpperCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
43
0
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) ->Optional[Any]: UpperCAmelCase_ = jnp.ones((batch_size, length) ) / length return scores def lowerCAmelCase__ ( self : List[Any] ) ->Tuple: UpperCAmelCase_ = None UpperCAmelCase_ = 20 UpperCAmelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase__ ) # tweak scores to not be uniform anymore UpperCAmelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch UpperCAmelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax UpperCAmelCase_ = jax.nn.softmax(UpperCAmelCase__ , axis=-1 ) UpperCAmelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 ) UpperCAmelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 ) UpperCAmelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__ ) , axis=-1 ) UpperCAmelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__ ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def lowerCAmelCase__ ( self : List[str] ) ->str: UpperCAmelCase_ = None UpperCAmelCase_ = 10 UpperCAmelCase_ = 2 # create ramp distribution UpperCAmelCase_ = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy() UpperCAmelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size UpperCAmelCase_ = FlaxTopKLogitsWarper(3 ) UpperCAmelCase_ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case UpperCAmelCase_ = 5 UpperCAmelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) UpperCAmelCase_ = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, length) ).copy() UpperCAmelCase_ = top_k_warp_safety_check(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]: UpperCAmelCase_ = None UpperCAmelCase_ = 10 UpperCAmelCase_ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) UpperCAmelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) UpperCAmelCase_ = FlaxTopPLogitsWarper(0.8 ) UpperCAmelCase_ = np.exp(top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 UpperCAmelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) ) # check edge cases with negative and extreme logits UpperCAmelCase_ = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme UpperCAmelCase_ = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept UpperCAmelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) UpperCAmelCase_ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def lowerCAmelCase__ ( self : Dict ) ->List[Any]: UpperCAmelCase_ = 20 UpperCAmelCase_ = 4 UpperCAmelCase_ = 0 UpperCAmelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__ ) # check that min length is applied at length 5 UpperCAmelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 ) UpperCAmelCase_ = 5 UpperCAmelCase_ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] ) # check that min length is not applied anymore at length 15 UpperCAmelCase_ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = 15 UpperCAmelCase_ = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() ) def lowerCAmelCase__ ( self : Any ) ->str: UpperCAmelCase_ = 20 UpperCAmelCase_ = 4 UpperCAmelCase_ = 0 UpperCAmelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ ) # check that all scores are -inf except the bos_token_id score UpperCAmelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 ) UpperCAmelCase_ = 1 UpperCAmelCase_ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 UpperCAmelCase_ = 3 UpperCAmelCase_ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() ) def lowerCAmelCase__ ( self : Tuple ) ->Dict: UpperCAmelCase_ = 20 UpperCAmelCase_ = 4 UpperCAmelCase_ = 0 UpperCAmelCase_ = 5 UpperCAmelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ ) # check that all scores are -inf except the eos_token_id when max_length is reached UpperCAmelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 ) UpperCAmelCase_ = 4 UpperCAmelCase_ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached UpperCAmelCase_ = 3 UpperCAmelCase_ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() ) def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]: UpperCAmelCase_ = 4 UpperCAmelCase_ = 10 UpperCAmelCase_ = 15 UpperCAmelCase_ = 2 UpperCAmelCase_ = 1 UpperCAmelCase_ = 15 # dummy input_ids and scores UpperCAmelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__ ) UpperCAmelCase_ = input_ids.copy() UpperCAmelCase_ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = scores.copy() # instantiate all dist processors UpperCAmelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 ) UpperCAmelCase_ = FlaxTopKLogitsWarper(3 ) UpperCAmelCase_ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors UpperCAmelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__ ) UpperCAmelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ ) UpperCAmelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ ) UpperCAmelCase_ = 10 # no processor list UpperCAmelCase_ = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) UpperCAmelCase_ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) UpperCAmelCase_ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) UpperCAmelCase_ = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) UpperCAmelCase_ = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) UpperCAmelCase_ = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) # with processor list UpperCAmelCase_ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) UpperCAmelCase_ = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) # scores should be equal self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def lowerCAmelCase__ ( self : List[str] ) ->Optional[int]: UpperCAmelCase_ = 4 UpperCAmelCase_ = 10 UpperCAmelCase_ = 15 UpperCAmelCase_ = 2 UpperCAmelCase_ = 1 UpperCAmelCase_ = 15 # dummy input_ids and scores UpperCAmelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__ ) UpperCAmelCase_ = input_ids.copy() UpperCAmelCase_ = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = scores.copy() # instantiate all dist processors UpperCAmelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 ) UpperCAmelCase_ = FlaxTopKLogitsWarper(3 ) UpperCAmelCase_ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors UpperCAmelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase__ ) UpperCAmelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ ) UpperCAmelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ ) UpperCAmelCase_ = 10 # no processor list def run_no_processor_list(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] ): UpperCAmelCase_ = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) UpperCAmelCase_ = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) UpperCAmelCase_ = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) UpperCAmelCase_ = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) UpperCAmelCase_ = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) UpperCAmelCase_ = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) return scores # with processor list def run_processor_list(UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ): UpperCAmelCase_ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) UpperCAmelCase_ = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) return scores UpperCAmelCase_ = jax.jit(UpperCAmelCase__ ) UpperCAmelCase_ = jax.jit(UpperCAmelCase__ ) UpperCAmelCase_ = jitted_run_no_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = jitted_run_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # scores should be equal self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
702
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : int = logging.get_logger(__name__) lowercase__ : List[str] = { "huggingface/time-series-transformer-tourism-monthly": ( "https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''time_series_transformer''' lowerCAmelCase__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "student_t" , UpperCAmelCase__ : str = "nll" , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase__ : Optional[Union[str, bool]] = "mean" , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Tuple , ) ->Optional[int]: # time series specific configuration UpperCAmelCase_ = prediction_length UpperCAmelCase_ = context_length or prediction_length UpperCAmelCase_ = distribution_output UpperCAmelCase_ = loss UpperCAmelCase_ = input_size UpperCAmelCase_ = num_time_features UpperCAmelCase_ = lags_sequence UpperCAmelCase_ = scaling UpperCAmelCase_ = num_dynamic_real_features UpperCAmelCase_ = num_static_real_features UpperCAmelCase_ = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) UpperCAmelCase_ = cardinality else: UpperCAmelCase_ = [0] if embedding_dimension and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) UpperCAmelCase_ = embedding_dimension else: UpperCAmelCase_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] UpperCAmelCase_ = num_parallel_samples # Transformer architecture configuration UpperCAmelCase_ = input_size * len(UpperCAmelCase__ ) + self._number_of_features UpperCAmelCase_ = d_model UpperCAmelCase_ = encoder_attention_heads UpperCAmelCase_ = decoder_attention_heads UpperCAmelCase_ = encoder_ffn_dim UpperCAmelCase_ = decoder_ffn_dim UpperCAmelCase_ = encoder_layers UpperCAmelCase_ = decoder_layers UpperCAmelCase_ = dropout UpperCAmelCase_ = attention_dropout UpperCAmelCase_ = activation_dropout UpperCAmelCase_ = encoder_layerdrop UpperCAmelCase_ = decoder_layerdrop UpperCAmelCase_ = activation_function UpperCAmelCase_ = init_std UpperCAmelCase_ = use_cache super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ ) @property def lowerCAmelCase__ ( self : List[str] ) ->int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
43
0
'''simple docstring''' from manim import * class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Any ) ->Dict: UpperCAmelCase_ = Rectangle(height=0.5 , width=0.5 ) UpperCAmelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) UpperCAmelCase_ = [mem.copy() for i in range(6 )] UpperCAmelCase_ = [mem.copy() for i in range(6 )] UpperCAmelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) UpperCAmelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) UpperCAmelCase_ = VGroup(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) UpperCAmelCase_ = Text('''CPU''' , font_size=24 ) UpperCAmelCase_ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(UpperCAmelCase__ ) UpperCAmelCase_ = [mem.copy() for i in range(1 )] UpperCAmelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) UpperCAmelCase_ = Text('''GPU''' , font_size=24 ) UpperCAmelCase_ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ ) gpu.align_to(UpperCAmelCase__ , UpperCAmelCase__ ) gpu.set_x(gpu.get_x() - 1 ) self.add(UpperCAmelCase__ ) UpperCAmelCase_ = [mem.copy() for i in range(6 )] UpperCAmelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 ) UpperCAmelCase_ = Text('''Model''' , font_size=24 ) UpperCAmelCase_ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ ) model.move_to([3, -1.0, 0] ) self.play( Create(UpperCAmelCase__ , run_time=1 ) , Create(UpperCAmelCase__ , run_time=1 ) , Create(UpperCAmelCase__ , run_time=1 ) , ) UpperCAmelCase_ = MarkupText( f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , ) UpperCAmelCase_ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCAmelCase_ = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(UpperCAmelCase__ , run_time=2.5 ) , Write(UpperCAmelCase__ ) , Write(UpperCAmelCase__ ) ) self.add(UpperCAmelCase__ ) UpperCAmelCase_ = [] UpperCAmelCase_ = [] UpperCAmelCase_ = [] for i, rect in enumerate(UpperCAmelCase__ ): UpperCAmelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase__ , opacity=0.7 ) cpu_target.move_to(UpperCAmelCase__ ) cpu_target.generate_target() UpperCAmelCase_ = 0.46 / 4 UpperCAmelCase_ = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCAmelCase__ ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCAmelCase__ , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCAmelCase__ , buff=0.0 ) cpu_targs.append(UpperCAmelCase__ ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCAmelCase__ ) ) second_animations.append(MoveToTarget(UpperCAmelCase__ , run_time=1.5 ) ) self.play(*UpperCAmelCase__ ) self.play(*UpperCAmelCase__ ) self.wait()
703
'''simple docstring''' from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig lowercase__ : Dict = logging.get_logger(__name__) lowercase__ : List[Any] = "T5Config" class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''mt5''' lowerCAmelCase__ = MTaConfig class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''mt5''' lowerCAmelCase__ = MTaConfig class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''mt5''' lowerCAmelCase__ = MTaConfig
43
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) lowercase__ : List[Any] = logging.getLogger(__name__) @dataclass class lowerCamelCase : '''simple docstring''' lowerCAmelCase__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) lowerCAmelCase__ = field( default=lowerCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase__ = field( default=lowerCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) lowerCAmelCase__ = field( default=lowerCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) lowerCAmelCase__ = field(default=lowerCamelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} ) lowerCAmelCase__ = field(default=lowerCamelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} ) @dataclass class lowerCamelCase : '''simple docstring''' lowerCAmelCase__ = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) lowerCAmelCase__ = field( default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , ) lowerCAmelCase__ = field( default=10_24 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) lowerCAmelCase__ = field( default=1_28 , metadata={ '''help''': ( '''The maximum total sequence length for target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) lowerCAmelCase__ = field( default=1_42 , metadata={ '''help''': ( '''The maximum total sequence length for validation target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded. ''' '''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ''' '''during ``evaluate`` and ``predict``.''' ) } , ) lowerCAmelCase__ = field( default=1_42 , metadata={ '''help''': ( '''The maximum total sequence length for test target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) lowerCAmelCase__ = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} ) lowerCAmelCase__ = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} ) lowerCAmelCase__ = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} ) lowerCAmelCase__ = field(default=lowerCamelCase , metadata={'''help''': '''Source language id for translation.'''} ) lowerCAmelCase__ = field(default=lowerCamelCase , metadata={'''help''': '''Target language id for translation.'''} ) lowerCAmelCase__ = field(default=lowerCamelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} ) lowerCAmelCase__ = field( default=lowerCamelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , ) def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : Any ): '''simple docstring''' logger.info(F"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(F""" {key} = {metrics[key]}""" ) save_json(_UpperCamelCase , os.path.join(_UpperCamelCase , F"""{split}_results.json""" ) ) def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_args_into_dataclasses() check_output_dir(_UpperCamelCase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , _UpperCamelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase_ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase_ = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): assert hasattr(_UpperCamelCase , _UpperCamelCase ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(_UpperCamelCase , _UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) ) UpperCAmelCase_ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=_UpperCamelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(_UpperCamelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: UpperCAmelCase_ = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(_UpperCamelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang] else: UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(_UpperCamelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) UpperCAmelCase_ = SeqaSeqDataset # Get datasets UpperCAmelCase_ = ( dataset_class( _UpperCamelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) UpperCAmelCase_ = ( dataset_class( _UpperCamelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) UpperCAmelCase_ = ( dataset_class( _UpperCamelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer UpperCAmelCase_ = ( build_compute_metrics_fn(data_args.task , _UpperCamelCase ) if training_args.predict_with_generate else None ) UpperCAmelCase_ = SeqaSeqTrainer( model=_UpperCamelCase , args=_UpperCamelCase , data_args=_UpperCamelCase , train_dataset=_UpperCamelCase , eval_dataset=_UpperCamelCase , data_collator=SeqaSeqDataCollator( _UpperCamelCase , _UpperCamelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , ) UpperCAmelCase_ = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) UpperCAmelCase_ = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) UpperCAmelCase_ = train_result.metrics UpperCAmelCase_ = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , _UpperCamelCase , training_args.output_dir ) all_metrics.update(_UpperCamelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase_ = trainer.evaluate(metric_key_prefix='''val''' ) UpperCAmelCase_ = data_args.n_val UpperCAmelCase_ = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , _UpperCamelCase , training_args.output_dir ) all_metrics.update(_UpperCamelCase ) if training_args.do_predict: logger.info('''*** Predict ***''' ) UpperCAmelCase_ = trainer.predict(test_dataset=_UpperCamelCase , metric_key_prefix='''test''' ) UpperCAmelCase_ = test_output.metrics UpperCAmelCase_ = data_args.n_test if trainer.is_world_process_zero(): UpperCAmelCase_ = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , _UpperCamelCase , training_args.output_dir ) all_metrics.update(_UpperCamelCase ) if training_args.predict_with_generate: UpperCAmelCase_ = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) UpperCAmelCase_ = lmap(str.strip , _UpperCamelCase ) write_txt_file(_UpperCamelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(_UpperCamelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' main() if __name__ == "__main__": main()
704
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets lowercase__ : str = datasets.logging.get_logger(__name__) lowercase__ : Dict = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n" lowercase__ : str = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n" lowercase__ : str = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase__ ( self : List[Any] ) ->Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''sources''': datasets.Value('''string''' , id='''sequence''' ), '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[ '''https://github.com/Unbabel/COMET''', '''https://www.aclweb.org/anthology/2020.emnlp-main.213/''', '''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''', ] , ) def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Union[str, Any] ) ->Any: if self.config_name == "default": UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) ) else: UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=False ) ->Optional[Any]: if gpus is None: UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0 UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references} UpperCAmelCase_ = [dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) for t in zip(*data.values() )] UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(UpperCAmelCase__ , gpus=UpperCAmelCase__ , progress_bar=UpperCAmelCase__ ) return {"mean_score": mean_score, "scores": scores}
43
0
'''simple docstring''' import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : str , ) ->Optional[int]: super().__init__() UpperCAmelCase_ = value_function UpperCAmelCase_ = unet UpperCAmelCase_ = scheduler UpperCAmelCase_ = env UpperCAmelCase_ = env.get_dataset() UpperCAmelCase_ = {} for key in self.data.keys(): try: UpperCAmelCase_ = self.data[key].mean() except: # noqa: E722 pass UpperCAmelCase_ = {} for key in self.data.keys(): try: UpperCAmelCase_ = self.data[key].std() except: # noqa: E722 pass UpperCAmelCase_ = env.observation_space.shape[0] UpperCAmelCase_ = env.action_space.shape[0] def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] ) ->List[Any]: return (x_in - self.means[key]) / self.stds[key] def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) ->List[str]: return x_in * self.stds[key] + self.means[key] def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : Optional[Any] ) ->Dict: if type(UpperCAmelCase__ ) is dict: return {k: self.to_torch(UpperCAmelCase__ ) for k, v in x_in.items()} elif torch.is_tensor(UpperCAmelCase__ ): return x_in.to(self.unet.device ) return torch.tensor(UpperCAmelCase__ , device=self.unet.device ) def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] ) ->Optional[Any]: for key, val in cond.items(): UpperCAmelCase_ = val.clone() return x_in def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) ->List[str]: UpperCAmelCase_ = x.shape[0] UpperCAmelCase_ = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model UpperCAmelCase_ = torch.full((batch_size,) , UpperCAmelCase__ , device=self.unet.device , dtype=torch.long ) for _ in range(UpperCAmelCase__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models UpperCAmelCase_ = self.value_function(x.permute(0 , 2 , 1 ) , UpperCAmelCase__ ).sample UpperCAmelCase_ = torch.autograd.grad([y.sum()] , [x] )[0] UpperCAmelCase_ = self.scheduler._get_variance(UpperCAmelCase__ ) UpperCAmelCase_ = torch.exp(0.5 * posterior_variance ) UpperCAmelCase_ = model_std * grad UpperCAmelCase_ = 0 UpperCAmelCase_ = x.detach() UpperCAmelCase_ = x + scale * grad UpperCAmelCase_ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim ) UpperCAmelCase_ = self.unet(x.permute(0 , 2 , 1 ) , UpperCAmelCase__ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg UpperCAmelCase_ = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , predict_epsilon=UpperCAmelCase__ )['''prev_sample'''] # apply conditions to the trajectory (set the initial state) UpperCAmelCase_ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim ) UpperCAmelCase_ = self.to_torch(UpperCAmelCase__ ) return x, y def __call__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]=64 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[int]=0.1 ) ->Optional[Any]: # normalize the observations and create batch dimension UpperCAmelCase_ = self.normalize(UpperCAmelCase__ , '''observations''' ) UpperCAmelCase_ = obs[None].repeat(UpperCAmelCase__ , axis=0 ) UpperCAmelCase_ = {0: self.to_torch(UpperCAmelCase__ )} UpperCAmelCase_ = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) UpperCAmelCase_ = randn_tensor(UpperCAmelCase__ , device=self.unet.device ) UpperCAmelCase_ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim ) UpperCAmelCase_ = self.to_torch(UpperCAmelCase__ ) # run the diffusion process UpperCAmelCase_ , UpperCAmelCase_ = self.run_diffusion(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # sort output trajectories by value UpperCAmelCase_ = y.argsort(0 , descending=UpperCAmelCase__ ).squeeze() UpperCAmelCase_ = x[sorted_idx] UpperCAmelCase_ = sorted_values[:, :, : self.action_dim] UpperCAmelCase_ = actions.detach().cpu().numpy() UpperCAmelCase_ = self.de_normalize(UpperCAmelCase__ , key='''actions''' ) # select the action with the highest value if y is not None: UpperCAmelCase_ = 0 else: # if we didn't run value guiding, select a random action UpperCAmelCase_ = np.random.randint(0 , UpperCAmelCase__ ) UpperCAmelCase_ = denorm_actions[selected_index, 0] return denorm_actions
705
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Union[str, Any]=400 , UpperCAmelCase__ : List[Any]=3 , ) ->Dict: UpperCAmelCase_ = parent UpperCAmelCase_ = do_resize UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288} UpperCAmelCase_ = size_divisor UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = do_center_crop UpperCAmelCase_ = image_mean UpperCAmelCase_ = image_std UpperCAmelCase_ = do_pad UpperCAmelCase_ = batch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = min_resolution UpperCAmelCase_ = max_resolution def lowerCAmelCase__ ( self : Tuple ) ->List[Any]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=False ) ->Any: if not batched: UpperCAmelCase_ = self.size['''shortest_edge'''] UpperCAmelCase_ = image_inputs[0] if isinstance(UpperCAmelCase__ , Image.Image ): UpperCAmelCase_ , UpperCAmelCase_ = image.size else: UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2] UpperCAmelCase_ = size / min(UpperCAmelCase__ , UpperCAmelCase__ ) if h < w: UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w else: UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size UpperCAmelCase_ = int((1333 / 800) * size ) if max(UpperCAmelCase__ , UpperCAmelCase__ ) > max_size: UpperCAmelCase_ = max_size / max(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = newh * scale UpperCAmelCase_ = neww * scale UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5 ), int(neww + 0.5 ) UpperCAmelCase_ , UpperCAmelCase_ = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: UpperCAmelCase_ = [] for image in image_inputs: UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0] UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = BridgeTowerImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self : Optional[int] ) ->str: UpperCAmelCase_ = BridgeTowerImageProcessingTester(self ) @property def lowerCAmelCase__ ( self : List[str] ) ->Dict: return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self : Optional[int] ) ->int: UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''size_divisor''' ) ) def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]: pass def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self : Any ) ->Optional[int]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self : int ) ->List[str]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
43
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase__ : List[str] = { "configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"], "feature_extraction_whisper": ["WhisperFeatureExtractor"], "processing_whisper": ["WhisperProcessor"], "tokenization_whisper": ["WhisperTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Optional[Any] = ["WhisperTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Tuple = [ "WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", "WhisperForConditionalGeneration", "WhisperModel", "WhisperPreTrainedModel", "WhisperForAudioClassification", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Union[str, Any] = [ "TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWhisperForConditionalGeneration", "TFWhisperModel", "TFWhisperPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : str = [ "FlaxWhisperForConditionalGeneration", "FlaxWhisperModel", "FlaxWhisperPreTrainedModel", "FlaxWhisperForAudioClassification", ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys lowercase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
706
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Dict ) ->List[str]: UpperCAmelCase_ = tf.convert_to_tensor( [ [ 8.222_0991, # 3rd highest value; idx. 0 -0.562_0044, 5.2322_9752, 4.038_6393, -6.879_8378, -0.5478_5802, -3.201_2153, 2.9277_7176, 1.8817_1953, 7.3534_1276, # 5th highest value; idx. 9 8.4320_7833, # 2nd highest value; idx. 10 -9.8571_1836, -5.9620_9236, -1.1303_9161, -7.111_5294, -0.836_9633, -5.318_6408, 7.0642_7407, 0.8136_9344, -0.8202_3817, -5.917_9796, 0.5881_3443, -6.9977_8438, 4.7155_1189, -0.1877_1637, 7.4402_0759, # 4th highest value; idx. 25 9.3845_0987, # 1st highest value; idx. 26 2.1266_2941, -9.3256_2038, 2.3565_2522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.5842_5518, 4.5313_9238, -5.5751_0464, -6.2803_0699, -7.1952_9503, -4.0212_2551, 1.3933_7037, -6.0670_7057, 1.5948_0517, -9.64_3119, 0.0390_7799, 0.6723_1762, -8.8820_6726, 6.2711_5922, # 4th highest value; idx. 13 2.2852_0723, 4.8276_7506, 4.3042_1368, 8.827_5313, # 2nd highest value; idx. 17 5.4402_9958, # 5th highest value; idx. 18 -4.473_5794, 7.3857_9536, # 3rd highest value; idx. 20 -2.9105_1663, 2.6194_6077, -2.567_4762, -9.4895_9302, -4.0292_2645, -1.3541_6918, 9.6770_2323, # 1st highest value; idx. 27 -5.8947_8553, 1.8537_0467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) UpperCAmelCase_ = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above UpperCAmelCase_ = tf.convert_to_tensor( [8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above UpperCAmelCase_ = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) UpperCAmelCase_ = output[output != -float('''inf''' )] UpperCAmelCase_ = tf.cast( tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-12 ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @require_tf class lowerCamelCase ( unittest.TestCase , lowerCamelCase ): '''simple docstring''' if is_tf_available(): lowerCAmelCase__ = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]: # TF-only test: tf.saved_model export UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 2 UpperCAmelCase_ = 2 class lowerCamelCase ( tf.Module ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : List[str] ) ->Dict: super(UpperCAmelCase__ , self ).__init__() UpperCAmelCase_ = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=UpperCAmelCase__ , ) def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ) ->int: UpperCAmelCase_ = self.model.generate( input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , ) return {"sequences": outputs["sequences"]} UpperCAmelCase_ = [[2, 0], [102, 103]] UpperCAmelCase_ = [[1, 0], [1, 1]] UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} ) UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default'''] for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ): UpperCAmelCase_ = { '''input_ids''': tf.constant(dummy_input_ids[:batch_size] ), '''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ), } UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences'''] UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]: # TF-only test: tf.saved_model export UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 1 UpperCAmelCase_ = 2 class lowerCamelCase ( tf.Module ): '''simple docstring''' def __init__( self : int , UpperCAmelCase__ : int ) ->List[str]: super(UpperCAmelCase__ , self ).__init__() UpperCAmelCase_ = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=UpperCAmelCase__ , ) def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) ->int: UpperCAmelCase_ = self.model.generate( input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , ) return {"sequences": outputs["sequences"]} UpperCAmelCase_ = [[2], [102, 103]] UpperCAmelCase_ = [[1], [1, 1]] UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} ) UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default'''] for input_row in range(len(UpperCAmelCase__ ) ): UpperCAmelCase_ = { '''input_ids''': tf.constant([dummy_input_ids[input_row]] ), '''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ), } UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences'''] UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @slow @require_tensorflow_text def lowerCAmelCase__ ( self : Optional[Any] ) ->int: # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCAmelCase__ ) class lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ) ->Any: super().__init__() UpperCAmelCase_ = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , '''spiece.model''' ) , '''rb''' ).read() ) UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : str ) ->List[str]: UpperCAmelCase_ = self.tokenizer.tokenize(UpperCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = text.pad_model_inputs( UpperCAmelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) UpperCAmelCase_ = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) return self.tokenizer.detokenize(UpperCAmelCase__ ) UpperCAmelCase_ = CompleteSentenceTransformer() UpperCAmelCase_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' ) UpperCAmelCase_ = complete_model(UpperCAmelCase__ ) UpperCAmelCase_ = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ ) keras_model.save(UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple: # Has PT equivalent: this test relies on random sampling UpperCAmelCase_ = { '''do_sample''': True, '''num_beams''': 1, '''top_p''': 0.7, '''top_k''': 10, '''temperature''': 0.7, } UpperCAmelCase_ = 14 UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = '''Hello, my dog is cute and''' UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''tf''' ) UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) UpperCAmelCase_ = [638, 198] with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]: # Has PT equivalent: ample use of framework-specific code UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = '''Hugging Face is a technology company based in New York and Paris.''' UpperCAmelCase_ = bart_tokenizer(UpperCAmelCase__ , return_tensors='''tf''' ).input_ids UpperCAmelCase_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy() class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int ) ->List[str]: return super().call(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ , foo='''bar''' ).numpy() self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) ) class lowerCamelCase ( bart_model.model.encoder.__class__ ): '''simple docstring''' def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict ) ->Any: return super().call(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = FakeEncoder(bart_model.config , bart_model.model.shared ) UpperCAmelCase_ = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy() with self.assertRaises(UpperCAmelCase__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(UpperCAmelCase__ , foo='''bar''' )
43
0
'''simple docstring''' import math from numpy import inf from scipy.integrate import quad def __lowerCamelCase ( _UpperCamelCase : float ): '''simple docstring''' if num <= 0: raise ValueError('''math domain error''' ) return quad(_UpperCamelCase , 0 , _UpperCamelCase , args=(_UpperCamelCase) )[0] def __lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float ): '''simple docstring''' return math.pow(_UpperCamelCase , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
707
'''simple docstring''' from collections.abc import Callable def __lowerCamelCase ( _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float , _UpperCamelCase : float ): '''simple docstring''' UpperCAmelCase_ = a UpperCAmelCase_ = b if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function return a elif function(_UpperCamelCase ) == 0: return b elif ( function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: UpperCAmelCase_ = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_UpperCamelCase ) == 0: return mid elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0: UpperCAmelCase_ = mid else: UpperCAmelCase_ = mid UpperCAmelCase_ = start + (end - start) / 2.0 return mid def __lowerCamelCase ( _UpperCamelCase : float ): '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
43
0
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Union[str, Any]=False ): '''simple docstring''' UpperCAmelCase_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''module.cls_token''', '''vit.embeddings.cls_token'''), ('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''module.pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''module.norm.weight''', '''layernorm.weight'''), ('''module.norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" UpperCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: UpperCAmelCase_ = '''''' else: UpperCAmelCase_ = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" ) UpperCAmelCase_ = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase_ = in_proj_bias[: config.hidden_size] UpperCAmelCase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase_ = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase_ = in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( _UpperCamelCase : List[Any] ): '''simple docstring''' UpperCAmelCase_ = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(_UpperCamelCase , _UpperCamelCase ) def __lowerCamelCase ( _UpperCamelCase : List[str] ): '''simple docstring''' UpperCAmelCase_ = [ '''module.fc.fc1.weight''', '''module.fc.fc1.bias''', '''module.fc.bn1.weight''', '''module.fc.bn1.bias''', '''module.fc.bn1.running_mean''', '''module.fc.bn1.running_var''', '''module.fc.bn1.num_batches_tracked''', '''module.fc.fc2.weight''', '''module.fc.fc2.bias''', '''module.fc.bn2.weight''', '''module.fc.bn2.bias''', '''module.fc.bn2.running_mean''', '''module.fc.bn2.running_var''', '''module.fc.bn2.num_batches_tracked''', '''module.fc.fc3.weight''', '''module.fc.fc3.bias''', ] for k in ignore_keys: state_dict.pop(_UpperCamelCase , _UpperCamelCase ) def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple ): '''simple docstring''' UpperCAmelCase_ = dct.pop(_UpperCamelCase ) UpperCAmelCase_ = val def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ = ViTMSNConfig() UpperCAmelCase_ = 1000 UpperCAmelCase_ = '''datasets/huggingface/label-files''' UpperCAmelCase_ = '''imagenet-1k-id2label.json''' UpperCAmelCase_ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase ) , '''r''' ) ) UpperCAmelCase_ = {int(_UpperCamelCase ): v for k, v in idalabel.items()} UpperCAmelCase_ = idalabel UpperCAmelCase_ = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: UpperCAmelCase_ = 384 UpperCAmelCase_ = 1536 UpperCAmelCase_ = 6 elif "l16" in checkpoint_url: UpperCAmelCase_ = 1024 UpperCAmelCase_ = 4096 UpperCAmelCase_ = 24 UpperCAmelCase_ = 16 UpperCAmelCase_ = 0.1 elif "b4" in checkpoint_url: UpperCAmelCase_ = 4 elif "l7" in checkpoint_url: UpperCAmelCase_ = 7 UpperCAmelCase_ = 1024 UpperCAmelCase_ = 4096 UpperCAmelCase_ = 24 UpperCAmelCase_ = 16 UpperCAmelCase_ = 0.1 UpperCAmelCase_ = ViTMSNModel(_UpperCamelCase ) UpperCAmelCase_ = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='''cpu''' )['''target_encoder'''] UpperCAmelCase_ = ViTImageProcessor(size=config.image_size ) remove_projection_head(_UpperCamelCase ) UpperCAmelCase_ = create_rename_keys(_UpperCamelCase , base_model=_UpperCamelCase ) for src, dest in rename_keys: rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , base_model=_UpperCamelCase ) model.load_state_dict(_UpperCamelCase ) model.eval() UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase_ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) UpperCAmelCase_ = ViTImageProcessor( size=config.image_size , image_mean=_UpperCamelCase , image_std=_UpperCamelCase ) UpperCAmelCase_ = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ) # forward pass torch.manual_seed(2 ) UpperCAmelCase_ = model(**_UpperCamelCase ) UpperCAmelCase_ = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: UpperCAmelCase_ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] ) elif "b16" in checkpoint_url: UpperCAmelCase_ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] ) elif "l16" in checkpoint_url: UpperCAmelCase_ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] ) elif "b4" in checkpoint_url: UpperCAmelCase_ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] ) else: UpperCAmelCase_ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , _UpperCamelCase , atol=1E-4 ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(_UpperCamelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowercase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar", type=str, help="URL of the checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) lowercase__ : str = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
708
'''simple docstring''' import re def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )] def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' UpperCAmelCase_ = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : str ): '''simple docstring''' try: UpperCAmelCase_ = split_input(_UpperCamelCase ) if upper: UpperCAmelCase_ = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: UpperCAmelCase_ = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' return to_simple_case(_UpperCamelCase ) def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' try: UpperCAmelCase_ = to_simple_case(_UpperCamelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ): '''simple docstring''' return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''_''' ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ): '''simple docstring''' return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''-''' ) if __name__ == "__main__": __import__("doctest").testmod()
43
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : str = logging.get_logger(__name__) lowercase__ : Tuple = { "sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json", # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''vit_msn''' def __init__( self : List[Any] , UpperCAmelCase__ : str=768 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : str=12 , UpperCAmelCase__ : int=3072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : Union[str, Any]=1e-06 , UpperCAmelCase__ : str=224 , UpperCAmelCase__ : Optional[int]=16 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : str=True , **UpperCAmelCase__ : int , ) ->str: super().__init__(**UpperCAmelCase__ ) UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = qkv_bias
709
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowercase__ : Optional[Any] = "platform" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=None , _UpperCamelCase : int=None , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=None , ): '''simple docstring''' if attention_mask is None: UpperCAmelCase_ = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: UpperCAmelCase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: UpperCAmelCase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCamelCase : '''simple docstring''' def __init__( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[int]=99 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Union[str, Any]=0.02 , ) ->Optional[int]: UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = eos_token_id UpperCAmelCase_ = pad_token_id UpperCAmelCase_ = bos_token_id UpperCAmelCase_ = initializer_range def lowerCAmelCase__ ( self : int ) ->Any: UpperCAmelCase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) UpperCAmelCase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 ) UpperCAmelCase_ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , ) UpperCAmelCase_ = prepare_blenderbot_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return config, inputs_dict def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]: UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs() return config, inputs_dict def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) ->Tuple: UpperCAmelCase_ = 20 UpperCAmelCase_ = model_class_name(UpperCAmelCase__ ) UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase_ , UpperCAmelCase_ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) UpperCAmelCase_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , ) UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , ) UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) ->Union[str, Any]: UpperCAmelCase_ = 20 UpperCAmelCase_ = model_class_name(UpperCAmelCase__ ) UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase_ , UpperCAmelCase_ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase_ = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , ) UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , ) UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ ) UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = 99 def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]: UpperCAmelCase_ = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) UpperCAmelCase_ = input_ids.shape[0] UpperCAmelCase_ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowerCAmelCase__ ( self : Any ) ->str: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_config_and_data() UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ ) UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ ) UpperCAmelCase_ = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : str ) ->int: UpperCAmelCase_ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ ) UpperCAmelCase_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) UpperCAmelCase_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ ) UpperCAmelCase_ = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]: UpperCAmelCase_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 ) UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum() UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(UpperCAmelCase__ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowerCamelCase ( lowerCamelCase , unittest.TestCase , lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = True lowerCAmelCase__ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]: UpperCAmelCase_ = FlaxBlenderbotModelTester(self ) def lowerCAmelCase__ ( self : str ) ->Tuple: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Tuple ) ->str: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Dict ) ->Tuple: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = model_class(UpperCAmelCase__ ) @jax.jit def encode_jitted(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : Union[str, Any] ): return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple() self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase__ ( self : str ) ->str: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ = model_class(UpperCAmelCase__ ) UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) UpperCAmelCase_ = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ): return model.decode( decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple() self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCAmelCase__ ( self : int ) ->int: for model_class_name in self.all_model_classes: UpperCAmelCase_ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids UpperCAmelCase_ = np.ones((1, 1) ) * model.config.eos_token_id UpperCAmelCase_ = model(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]: UpperCAmelCase_ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} UpperCAmelCase_ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCAmelCase__ ) UpperCAmelCase_ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) UpperCAmelCase_ = ['''Sam'''] UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''jax''' ) UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = '''Sam is a great name. It means "sun" in Gaelic.''' UpperCAmelCase_ = tokenizer.batch_decode(UpperCAmelCase__ , **UpperCAmelCase__ ) assert generated_txt[0].strip() == tgt_text
43
0
'''simple docstring''' from __future__ import annotations def __lowerCamelCase ( _UpperCamelCase : list[int] ): '''simple docstring''' return len(set(_UpperCamelCase ) ) == len(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
710
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowercase__ : Tuple = pytest.mark.integration @pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] ) def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ): '''simple docstring''' inspect_dataset(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = path + '''.py''' assert script_name in os.listdir(_UpperCamelCase ) assert "__pycache__" not in os.listdir(_UpperCamelCase ) @pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.parametrize('''path''' , ['''accuracy'''] ) def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ): '''simple docstring''' inspect_metric(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = path + '''.py''' assert script_name in os.listdir(_UpperCamelCase ) assert "__pycache__" not in os.listdir(_UpperCamelCase ) @pytest.mark.parametrize( '''path, config_name, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ): '''simple docstring''' with pytest.raises(_UpperCamelCase ): get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase ) @pytest.mark.parametrize( '''path, expected''' , [ ('''squad''', '''plain_text'''), ('''acronym_identification''', '''default'''), ('''lhoestq/squad''', '''plain_text'''), ('''lhoestq/test''', '''default'''), ('''lhoestq/demo1''', '''lhoestq--demo1'''), ('''dalle-mini/wit''', '''dalle-mini--wit'''), ] , ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ = get_dataset_config_names(_UpperCamelCase ) assert expected in config_names @pytest.mark.parametrize( '''path, expected_configs, expected_splits_in_first_config''' , [ ('''squad''', ['''plain_text'''], ['''train''', '''validation''']), ('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']), ('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase ) assert list(infos.keys() ) == expected_configs UpperCAmelCase_ = expected_configs[0] assert expected_config in infos UpperCAmelCase_ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( '''path, expected_config, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : str ): '''simple docstring''' UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase ) assert expected_config in infos UpperCAmelCase_ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ): '''simple docstring''' with pytest.raises(_UpperCamelCase ): get_dataset_split_names(_UpperCamelCase , config_name=_UpperCamelCase )
43
0
'''simple docstring''' import math def __lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float ): '''simple docstring''' if initial_intensity < 0: raise ValueError('''The value of intensity cannot be negative''' ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(_UpperCamelCase ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name="malus_law")
711
'''simple docstring''' import collections import os import re from pathlib import Path lowercase__ : List[Any] = "src/transformers" # Matches is_xxx_available() lowercase__ : Optional[Any] = re.compile(R"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} lowercase__ : Any = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowercase__ : Union[str, Any] = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available lowercase__ : Optional[int] = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") lowercase__ : List[str] = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowercase__ : Any = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", lowercase__ : List[Any] = re.compile(R"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], lowercase__ : Optional[Any] = re.compile(R"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo lowercase__ : Union[str, Any] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: lowercase__ : int = re.compile(R"^\s*try:") # Catches a line with else: lowercase__ : Any = re.compile(R"^\s*else:") def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ): '''simple docstring''' if _re_test_backend.search(_UpperCamelCase ) is None: return None UpperCAmelCase_ = [b[0] for b in _re_backend.findall(_UpperCamelCase )] backends.sort() return "_and_".join(_UpperCamelCase ) def __lowerCamelCase ( _UpperCamelCase : int ): '''simple docstring''' with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCAmelCase_ = f.readlines() UpperCAmelCase_ = 0 while line_index < len(_UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure UpperCAmelCase_ = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: UpperCAmelCase_ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_UpperCamelCase ): UpperCAmelCase_ = _re_one_line_import_struct.search(_UpperCamelCase ).groups()[0] UpperCAmelCase_ = re.findall(R'''\[([^\]]+)\]''' , _UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue UpperCAmelCase_ = _re_import_struct_key_value.search(_UpperCamelCase ) if single_line_import_search is not None: UpperCAmelCase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_UpperCamelCase ) > 0] objects.extend(_UpperCamelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 UpperCAmelCase_ = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCAmelCase_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): UpperCAmelCase_ = lines[line_index] if _re_import_struct_add_one.search(_UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(_UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(_UpperCamelCase ) is not None: UpperCAmelCase_ = _re_import_struct_add_many.search(_UpperCamelCase ).groups()[0].split(''', ''' ) UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0] objects.extend(_UpperCamelCase ) elif _re_between_brackets.search(_UpperCamelCase ) is not None: UpperCAmelCase_ = _re_between_brackets.search(_UpperCamelCase ).groups()[0].split(''', ''' ) UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0] objects.extend(_UpperCamelCase ) elif _re_quote_object.search(_UpperCamelCase ) is not None: objects.append(_re_quote_object.search(_UpperCamelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 UpperCAmelCase_ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCAmelCase_ = [] while ( line_index < len(_UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): UpperCAmelCase_ = lines[line_index] UpperCAmelCase_ = _re_import.search(_UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCAmelCase_ = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. UpperCAmelCase_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): UpperCAmelCase_ = lines[line_index] UpperCAmelCase_ = _re_import.search(_UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 UpperCAmelCase_ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ): '''simple docstring''' def find_duplicates(_UpperCamelCase : Tuple ): return [k for k, v in collections.Counter(_UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCAmelCase_ = [] for key in import_dict_objects.keys(): UpperCAmelCase_ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) UpperCAmelCase_ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCAmelCase_ = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = [] for root, _, files in os.walk(_UpperCamelCase ): if "__init__.py" in files: UpperCAmelCase_ = os.path.join(_UpperCamelCase , '''__init__.py''' ) UpperCAmelCase_ = parse_init(_UpperCamelCase ) if objects is not None: UpperCAmelCase_ = analyze_results(*_UpperCamelCase ) if len(_UpperCamelCase ) > 0: UpperCAmelCase_ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(_UpperCamelCase ) ) if len(_UpperCamelCase ) > 0: raise ValueError('''\n\n'''.join(_UpperCamelCase ) ) def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = [] for path, directories, files in os.walk(_UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue UpperCAmelCase_ = str((Path(_UpperCamelCase ) / folder).relative_to(_UpperCamelCase ) ) UpperCAmelCase_ = short_path.replace(os.path.sep , '''.''' ) submodules.append(_UpperCamelCase ) for fname in files: if fname == "__init__.py": continue UpperCAmelCase_ = str((Path(_UpperCamelCase ) / fname).relative_to(_UpperCamelCase ) ) UpperCAmelCase_ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_UpperCamelCase ) return submodules lowercase__ : Union[str, Any] = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def __lowerCamelCase ( ): '''simple docstring''' from transformers.utils import direct_transformers_import UpperCAmelCase_ = direct_transformers_import(_UpperCamelCase ) UpperCAmelCase_ = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f: UpperCAmelCase_ = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , _UpperCamelCase ) ) ) UpperCAmelCase_ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_UpperCamelCase ) > 0: UpperCAmelCase_ = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
43
0
'''simple docstring''' def __lowerCamelCase ( _UpperCamelCase : int = 10 ): '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ) or n < 0: raise ValueError('''Invalid input''' ) UpperCAmelCase_ = 10**n UpperCAmelCase_ = 2_8433 * (pow(2 , 783_0457 , _UpperCamelCase )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(10) = }''')
712
'''simple docstring''' from __future__ import annotations def __lowerCamelCase ( _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ = position UpperCAmelCase_ = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCAmelCase_ = [] for position in positions: UpperCAmelCase_ , UpperCAmelCase_ = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_UpperCamelCase ) return permissible_positions def __lowerCamelCase ( _UpperCamelCase : list[list[int]] ): '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def __lowerCamelCase ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ): '''simple docstring''' if is_complete(_UpperCamelCase ): return True for position in get_valid_pos(_UpperCamelCase , len(_UpperCamelCase ) ): UpperCAmelCase_ , UpperCAmelCase_ = position if board[y][x] == 0: UpperCAmelCase_ = curr + 1 if open_knight_tour_helper(_UpperCamelCase , _UpperCamelCase , curr + 1 ): return True UpperCAmelCase_ = 0 return False def __lowerCamelCase ( _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): UpperCAmelCase_ = 1 if open_knight_tour_helper(_UpperCamelCase , (i, j) , 1 ): return board UpperCAmelCase_ = 0 UpperCAmelCase_ = F"""Open Kight Tour cannot be performed on a board of size {n}""" raise ValueError(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
43
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase ( metaclass=lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = ['''flax''', '''transformers'''] def __init__( self : int , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : int ) ->Optional[Any]: requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def lowerCAmelCase__ ( cls : List[str] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : int ) ->List[str]: requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def lowerCAmelCase__ ( cls : Dict , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Dict ) ->Dict: requires_backends(cls , ['''flax''', '''transformers'''] ) class lowerCamelCase ( metaclass=lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = ['''flax''', '''transformers'''] def __init__( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict ) ->List[str]: requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def lowerCAmelCase__ ( cls : int , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : str ) ->List[str]: requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def lowerCAmelCase__ ( cls : List[Any] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : str ) ->Dict: requires_backends(cls , ['''flax''', '''transformers'''] ) class lowerCamelCase ( metaclass=lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = ['''flax''', '''transformers'''] def __init__( self : List[str] , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Union[str, Any] ) ->Any: requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def lowerCAmelCase__ ( cls : List[Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Union[str, Any] ) ->Dict: requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def lowerCAmelCase__ ( cls : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Optional[Any] ) ->Dict: requires_backends(cls , ['''flax''', '''transformers'''] ) class lowerCamelCase ( metaclass=lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = ['''flax''', '''transformers'''] def __init__( self : List[str] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : str ) ->Tuple: requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def lowerCAmelCase__ ( cls : str , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Union[str, Any] ) ->Optional[Any]: requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def lowerCAmelCase__ ( cls : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Dict ) ->str: requires_backends(cls , ['''flax''', '''transformers'''] )
713
'''simple docstring''' from __future__ import annotations from typing import TypedDict class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''The parameter s type must be str.''' ) return [s[i:] + s[:i] for i in range(len(_UpperCamelCase ) )] def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''The parameter s type must be str.''' ) if not s: raise ValueError('''The parameter s must not be empty.''' ) UpperCAmelCase_ = all_rotations(_UpperCamelCase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation UpperCAmelCase_ = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(_UpperCamelCase ), } return response def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int ): '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''The parameter bwt_string type must be str.''' ) if not bwt_string: raise ValueError('''The parameter bwt_string must not be empty.''' ) try: UpperCAmelCase_ = int(_UpperCamelCase ) except ValueError: raise TypeError( '''The parameter idx_original_string type must be int or passive''' ''' of cast to int.''' ) if idx_original_string < 0: raise ValueError('''The parameter idx_original_string must not be lower than 0.''' ) if idx_original_string >= len(_UpperCamelCase ): raise ValueError( '''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' ) UpperCAmelCase_ = [''''''] * len(_UpperCamelCase ) for _ in range(len(_UpperCamelCase ) ): for i in range(len(_UpperCamelCase ) ): UpperCAmelCase_ = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowercase__ : Optional[int] = "Provide a string that I will generate its BWT transform: " lowercase__ : List[Any] = input(entry_msg).strip() lowercase__ : Any = bwt_transform(s) print( F'''Burrows Wheeler transform for string \'{s}\' results ''' F'''in \'{result['bwt_string']}\'''' ) lowercase__ : Dict = reverse_bwt(result["bwt_string"], result["idx_original_string"]) print( F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' ''' F'''we get original string \'{original_string}\'''' )
43
0
'''simple docstring''' import functools def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str ): '''simple docstring''' UpperCAmelCase_ = len(_UpperCamelCase ) UpperCAmelCase_ = len(_UpperCamelCase ) @functools.cache def min_distance(_UpperCamelCase : int , _UpperCamelCase : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , _UpperCamelCase ) , 1 + min_distance(_UpperCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
714
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowercase__ : Union[str, Any] = { "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Union[str, Any] = ["MobileViTFeatureExtractor"] lowercase__ : List[Any] = ["MobileViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Dict = [ "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTForImageClassification", "MobileViTForSemanticSegmentation", "MobileViTModel", "MobileViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Optional[int] = [ "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", "TFMobileViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys lowercase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
43
0
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowercase__ : Dict = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Dict=18 , UpperCAmelCase__ : Tuple=30 , UpperCAmelCase__ : List[Any]=400 , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : List[str]=None , ) ->str: UpperCAmelCase_ = size if size is not None else {'''height''': 20, '''width''': 20} UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = image_size UpperCAmelCase_ = min_resolution UpperCAmelCase_ = max_resolution UpperCAmelCase_ = size UpperCAmelCase_ = do_normalize UpperCAmelCase_ = do_convert_rgb UpperCAmelCase_ = [512, 1024, 2048, 4096] UpperCAmelCase_ = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16} def lowerCAmelCase__ ( self : Any ) ->Any: return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]: UpperCAmelCase_ = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg''' UpperCAmelCase_ = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ).convert('''RGB''' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = PixaStructImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]: UpperCAmelCase_ = PixaStructImageProcessingTester(self ) @property def lowerCAmelCase__ ( self : Dict ) ->Dict: return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any: UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_convert_rgb''' ) ) def lowerCAmelCase__ ( self : Tuple ) ->List[Any]: UpperCAmelCase_ = self.image_processor_tester.prepare_dummy_image() UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase_ = 2048 UpperCAmelCase_ = image_processor(UpperCAmelCase__ , return_tensors='''pt''' , max_patches=UpperCAmelCase__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def lowerCAmelCase__ ( self : List[Any] ) ->Any: # Initialize image_processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input UpperCAmelCase_ = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase_ = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase_ = image_processor( UpperCAmelCase__ , return_tensors='''pt''' , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCAmelCase__ ( self : List[Any] ) ->Any: # Initialize image_processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input UpperCAmelCase_ = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 UpperCAmelCase_ = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(UpperCAmelCase__ ): UpperCAmelCase_ = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCAmelCase__ ).flattened_patches UpperCAmelCase_ = '''Hello''' UpperCAmelCase_ = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCAmelCase__ , header_text=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase_ = image_processor( UpperCAmelCase__ , return_tensors='''pt''' , max_patches=UpperCAmelCase__ , header_text=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCAmelCase__ ( self : int ) ->Optional[int]: # Initialize image_processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) UpperCAmelCase_ = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase_ = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase_ = image_processor( UpperCAmelCase__ , return_tensors='''pt''' , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def lowerCAmelCase__ ( self : Optional[Any] ) ->str: # Initialize image_processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input UpperCAmelCase_ = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase_ = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase_ = image_processor( UpperCAmelCase__ , return_tensors='''pt''' , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = PixaStructImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self : List[Any] ) ->Dict: UpperCAmelCase_ = PixaStructImageProcessingTester(self , num_channels=4 ) UpperCAmelCase_ = 3 @property def lowerCAmelCase__ ( self : int ) ->Dict: return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self : List[str] ) ->Any: UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_convert_rgb''' ) ) def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]: # Initialize image_processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input UpperCAmelCase_ = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase_ = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase_ = image_processor( UpperCAmelCase__ , return_tensors='''pt''' , max_patches=UpperCAmelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
715
'''simple docstring''' lowercase__ : Union[str, Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" lowercase__ : str = [{"type": "code", "content": INSTALL_CONTENT}] lowercase__ : Any = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
43
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase__ : Union[str, Any] = logging.get_logger(__name__) lowercase__ : Union[str, Any] = { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json" ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''roformer''' def __init__( self : List[str] , UpperCAmelCase__ : Dict=5_0000 , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[Any]=768 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3072 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=1536 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[Any]=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Optional[Any]=True , **UpperCAmelCase__ : Any , ) ->Tuple: super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size if embedding_size is None else embedding_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = hidden_act UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = rotary_value UpperCAmelCase_ = use_cache class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' @property def lowerCAmelCase__ ( self : str ) ->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''} UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
716
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase__ : Optional[Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : List[str] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys lowercase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
43
0
'''simple docstring''' lowercase__ : Any = 0 # The first color of the flag. lowercase__ : Union[str, Any] = 1 # The second color of the flag. lowercase__ : List[str] = 2 # The third color of the flag. lowercase__ : Optional[int] = (red, white, blue) def __lowerCamelCase ( _UpperCamelCase : list ): '''simple docstring''' if not sequence: return [] if len(_UpperCamelCase ) == 1: return list(_UpperCamelCase ) UpperCAmelCase_ = 0 UpperCAmelCase_ = len(_UpperCamelCase ) - 1 UpperCAmelCase_ = 0 while mid <= high: if sequence[mid] == colors[0]: UpperCAmelCase_ , UpperCAmelCase_ = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: UpperCAmelCase_ , UpperCAmelCase_ = sequence[high], sequence[mid] high -= 1 else: UpperCAmelCase_ = F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(_UpperCamelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod() lowercase__ : Tuple = input("Enter numbers separated by commas:\n").strip() lowercase__ : Any = [int(item.strip()) for item in user_input.split(",")] print(F'''{dutch_national_flag_sort(unsorted)}''')
717
'''simple docstring''' from heapq import heappop, heappush import numpy as np def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : bool , ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ = grid.shape UpperCAmelCase_ = [-1, 1, 0, 0] UpperCAmelCase_ = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] UpperCAmelCase_ , UpperCAmelCase_ = [(0, source)], set() UpperCAmelCase_ = np.full((rows, cols) , np.inf ) UpperCAmelCase_ = 0 UpperCAmelCase_ = np.empty((rows, cols) , dtype=_UpperCamelCase ) UpperCAmelCase_ = None while queue: ((UpperCAmelCase_) , (UpperCAmelCase_)) = heappop(_UpperCamelCase ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: UpperCAmelCase_ = [] while (x, y) != source: path.append((x, y) ) UpperCAmelCase_ , UpperCAmelCase_ = predecessors[x, y] path.append(_UpperCamelCase ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(_UpperCamelCase ) ): UpperCAmelCase_ , UpperCAmelCase_ = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: UpperCAmelCase_ = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(_UpperCamelCase , (dist + 1, (nx, ny)) ) UpperCAmelCase_ = dist + 1 UpperCAmelCase_ = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
43
0
'''simple docstring''' from __future__ import annotations from collections import namedtuple def __lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ): '''simple docstring''' UpperCAmelCase_ = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
718
'''simple docstring''' import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = XLMTokenizer lowerCAmelCase__ = False def lowerCAmelCase__ ( self : int ) ->Union[str, Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) UpperCAmelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase__ ) ) def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any ) ->List[Any]: UpperCAmelCase_ = '''lower newer''' UpperCAmelCase_ = '''lower newer''' return input_text, output_text def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple: UpperCAmelCase_ = XLMTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase_ = '''lower''' UpperCAmelCase_ = ['''low''', '''er</w>'''] UpperCAmelCase_ = tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = tokens + ['''<unk>'''] UpperCAmelCase_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Any ) ->str: UpperCAmelCase_ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' ) UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
43
0
'''simple docstring''' import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def __lowerCamelCase ( _UpperCamelCase : int=32 , _UpperCamelCase : List[str]=10 , _UpperCamelCase : Optional[int]=100 , _UpperCamelCase : List[Any]=1026 , _UpperCamelCase : List[str]=True , _UpperCamelCase : Optional[int]="data/tokenized_stories_train_wikitext103.jbl" , _UpperCamelCase : List[str]="igf_context_pairs.jbl" , ): '''simple docstring''' set_seed(3 ) # generate train_data and objective_set UpperCAmelCase_ , UpperCAmelCase_ = generate_datasets( _UpperCamelCase , _UpperCamelCase , number=_UpperCamelCase , min_len=1026 , trim=_UpperCamelCase ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? UpperCAmelCase_ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # load pretrained model UpperCAmelCase_ = load_gpta('''gpt2''' ).to(_UpperCamelCase ) print('''computing perplexity on objective set''' ) UpperCAmelCase_ = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).item() print('''perplexity on objective set:''' , _UpperCamelCase ) # collect igf pairs and save to file demo.jbl collect_objective_set(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int]=15 , _UpperCamelCase : List[Any]=128 , _UpperCamelCase : Any=100 , _UpperCamelCase : Union[str, Any]="igf_model.pt" , ): '''simple docstring''' set_seed(42 ) # Load pre-trained model UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained('''gpt2''' ) # Initialize secondary learner to use embedding weights of model UpperCAmelCase_ = SecondaryLearner(_UpperCamelCase ) # Train secondary learner UpperCAmelCase_ = train_secondary_learner( _UpperCamelCase , _UpperCamelCase , max_epochs=_UpperCamelCase , batch_size=_UpperCamelCase , eval_freq=100 , igf_model_path=_UpperCamelCase , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any]=32 , _UpperCamelCase : Any=1000 , _UpperCamelCase : List[Any]=16 , _UpperCamelCase : List[str]=1.0 , _UpperCamelCase : List[Any]=recopy_gpta , _UpperCamelCase : List[Any]=None , _UpperCamelCase : int=10 , _UpperCamelCase : Any="gpt2_finetuned.pt" , ): '''simple docstring''' UpperCAmelCase_ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) UpperCAmelCase_ = RandomSampler(_UpperCamelCase ) UpperCAmelCase_ = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase ) UpperCAmelCase_ = max_steps // (len(_UpperCamelCase )) + 1 UpperCAmelCase_ = 0 UpperCAmelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = recopy_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) model.train() if secondary_learner is not None: secondary_learner.to(_UpperCamelCase ) secondary_learner.eval() UpperCAmelCase_ = [] UpperCAmelCase_ = 0 UpperCAmelCase_ = [] UpperCAmelCase_ = [] # Compute the performance of the transformer model at the beginning UpperCAmelCase_ = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) test_perps.append(_UpperCamelCase ) print('''Test perplexity, step''' , _UpperCamelCase , ''':''' , _UpperCamelCase ) for epoch in range(int(_UpperCamelCase ) ): for step, example in enumerate(_UpperCamelCase ): torch.cuda.empty_cache() UpperCAmelCase_ = random.randint(0 , example.size(2 ) - context_len - 1 ) UpperCAmelCase_ = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() UpperCAmelCase_ = model(_UpperCamelCase , labels=_UpperCamelCase ) UpperCAmelCase_ = True if secondary_learner is not None: UpperCAmelCase_ = secondary_learner.forward( torch.tensor(_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase ).unsqueeze(0 ) )[0].item() observed_qs.append(float(_UpperCamelCase ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: UpperCAmelCase_ = -1 if predicted_q < threshold: UpperCAmelCase_ = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) UpperCAmelCase_ = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() UpperCAmelCase_ = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: UpperCAmelCase_ = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) test_perps.append(_UpperCamelCase ) print('''Test perplexity, step''' , _UpperCamelCase , ''':''' , _UpperCamelCase ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , _UpperCamelCase ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' ) # Required parameters parser.add_argument( '''--data_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The input data dir. Should contain data files for WikiText.''' , ) parser.add_argument( '''--model_name_or_path''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--data_file''' , type=_UpperCamelCase , default=_UpperCamelCase , help=( '''A jbl file containing tokenized data which can be split as objective dataset, ''' '''train_dataset and test_dataset.''' ) , ) parser.add_argument( '''--igf_data_file''' , type=_UpperCamelCase , default=_UpperCamelCase , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , ) parser.add_argument( '''--output_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The output directory where the final fine-tuned model is stored.''' , ) parser.add_argument( '''--tokenizer_name''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument('''--seed''' , type=_UpperCamelCase , default=_UpperCamelCase , help='''A seed for reproducible training.''' ) parser.add_argument( '''--context_len''' , default=32 , type=_UpperCamelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--size_objective_set''' , default=100 , type=_UpperCamelCase , help='''number of articles that are long enough to be used as our objective set''' , ) parser.add_argument( '''--eval_freq''' , default=100 , type=_UpperCamelCase , help='''secondary model evaluation is triggered at eval_freq''' ) parser.add_argument('''--max_steps''' , default=1000 , type=_UpperCamelCase , help='''To calculate training epochs''' ) parser.add_argument( '''--secondary_learner_batch_size''' , default=128 , type=_UpperCamelCase , help='''batch size of training data for secondary learner''' , ) parser.add_argument( '''--batch_size''' , default=16 , type=_UpperCamelCase , help='''batch size of training data of language model(gpt2) ''' ) parser.add_argument( '''--eval_interval''' , default=10 , type=_UpperCamelCase , help=( '''decay the selectivity of our secondary learner filter from''' '''1 standard deviation above average to 1 below average after 10 batches''' ) , ) parser.add_argument( '''--number''' , default=100 , type=_UpperCamelCase , help='''The number of examples split to be used as objective_set/test_data''' ) parser.add_argument( '''--min_len''' , default=1026 , type=_UpperCamelCase , help='''The minimum length of the article to be used as objective set''' ) parser.add_argument( '''--secondary_learner_max_epochs''' , default=15 , type=_UpperCamelCase , help='''number of epochs to train secondary learner''' ) parser.add_argument('''--trim''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''truncate the example if it exceeds context length''' ) parser.add_argument( '''--threshold''' , default=1.0 , type=_UpperCamelCase , help=( '''The threshold value used by secondary learner to filter the train_data and allow only''' ''' informative data as input to the model''' ) , ) parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=_UpperCamelCase , help='''finetuned_model_name''' ) parser.add_argument( '''--recopy_model''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_UpperCamelCase , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , ) # Load train data for secondary learner UpperCAmelCase_ = joblib.load('''data/IGF_values.jbl''' ) # Train secondary learner UpperCAmelCase_ = training_secondary_learner( _UpperCamelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , ) # load pretrained gpt2 model UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained('''gpt2''' ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model UpperCAmelCase_ , UpperCAmelCase_ = generate_datasets( context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1026 , trim=_UpperCamelCase ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_UpperCamelCase , secondary_learner=_UpperCamelCase , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , ) if __name__ == "__main__": main()
719
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ): '''simple docstring''' for param in module.parameters(): UpperCAmelCase_ = False def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCAmelCase_ = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ = plt.imshow(_UpperCamelCase ) fig.axes.get_xaxis().set_visible(_UpperCamelCase ) fig.axes.get_yaxis().set_visible(_UpperCamelCase ) plt.show() def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = datetime.now() UpperCAmelCase_ = current_time.strftime('''%H:%M:%S''' ) return timestamp
43
0
'''simple docstring''' from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase__ : pyspark.sql.DataFrame , UpperCAmelCase__ : Optional[NamedSplit] = None , UpperCAmelCase__ : Optional[Features] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "arrow" , **UpperCAmelCase__ : List[Any] , ) ->Optional[int]: super().__init__( split=UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , **UpperCAmelCase__ , ) UpperCAmelCase_ = load_from_cache_file UpperCAmelCase_ = file_format UpperCAmelCase_ = Spark( df=UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , working_dir=UpperCAmelCase__ , **UpperCAmelCase__ , ) def lowerCAmelCase__ ( self : Optional[Any] ) ->Any: if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) UpperCAmelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase__ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
720
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Any ) ->Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self : str ) ->List[str]: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_euler''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase__ ( self : List[str] ) ->int: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_euler''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=UpperCAmelCase__ , ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array( [0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
43
0
'''simple docstring''' from random import randint, random def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : int = 5 , ): '''simple docstring''' UpperCAmelCase_ = [[-1] * number_of_cells] # Create a highway without any car UpperCAmelCase_ = 0 UpperCAmelCase_ = max(_UpperCamelCase , 0 ) while i < number_of_cells: UpperCAmelCase_ = ( randint(0 , _UpperCamelCase ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def __lowerCamelCase ( _UpperCamelCase : list , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = 0 UpperCAmelCase_ = highway_now[car_index + 1 :] for cell in range(len(_UpperCamelCase ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(_UpperCamelCase , -1 ) def __lowerCamelCase ( _UpperCamelCase : list , _UpperCamelCase : float , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = len(_UpperCamelCase ) # Beforce calculations, the highway is empty UpperCAmelCase_ = [-1] * number_of_cells for car_index in range(_UpperCamelCase ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed UpperCAmelCase_ = min(highway_now[car_index] + 1 , _UpperCamelCase ) # Number of empty cell before the next car UpperCAmelCase_ = get_distance(_UpperCamelCase , _UpperCamelCase ) - 1 # We can't have the car causing an accident UpperCAmelCase_ = min(next_highway[car_index] , _UpperCamelCase ) if random() < probability: # Randomly, a driver will slow down UpperCAmelCase_ = max(next_highway[car_index] - 1 , 0 ) return next_highway def __lowerCamelCase ( _UpperCamelCase : list , _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = len(highway[0] ) for i in range(_UpperCamelCase ): UpperCAmelCase_ = update(highway[i] , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = [-1] * number_of_cells for car_index in range(_UpperCamelCase ): UpperCAmelCase_ = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) UpperCAmelCase_ = (car_index + speed) % number_of_cells # Commit the change of position UpperCAmelCase_ = speed highway.append(_UpperCamelCase ) return highway if __name__ == "__main__": import doctest doctest.testmod()
721
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = MvpTokenizer lowerCAmelCase__ = MvpTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = filter_roberta_detectors def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple: super().setUp() UpperCAmelCase_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) UpperCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] UpperCAmelCase_ = {'''unk_token''': '''<unk>'''} UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase__ ) ) def lowerCAmelCase__ ( self : Tuple , **UpperCAmelCase__ : List[str] ) ->Dict: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Optional[int] , **UpperCAmelCase__ : int ) ->Tuple: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ) ->Union[str, Any]: return "lower newer", "lower newer" @cached_property def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]: return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' ) @cached_property def lowerCAmelCase__ ( self : Tuple ) ->Tuple: return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' ) @require_torch def lowerCAmelCase__ ( self : Any ) ->Dict: UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] UpperCAmelCase_ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , return_tensors='''pt''' ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) UpperCAmelCase_ = batch.input_ids.tolist()[0] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # Test that special tokens are reset @require_torch def lowerCAmelCase__ ( self : str ) ->int: UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='''pt''' ) # check if input_ids are returned and no labels self.assertIn('''input_ids''' , UpperCAmelCase__ ) self.assertIn('''attention_mask''' , UpperCAmelCase__ ) self.assertNotIn('''labels''' , UpperCAmelCase__ ) self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase__ ) @require_torch def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]: UpperCAmelCase_ = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(text_target=UpperCAmelCase__ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def lowerCAmelCase__ ( self : List[str] ) ->int: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer( ['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='''pt''' ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 1024) ) @require_torch def lowerCAmelCase__ ( self : Dict ) ->Optional[int]: UpperCAmelCase_ = ['''A long paragraph for summarization.'''] UpperCAmelCase_ = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , text_target=UpperCAmelCase__ , return_tensors='''pt''' ) UpperCAmelCase_ = inputs['''input_ids'''] UpperCAmelCase_ = inputs['''labels'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def lowerCAmelCase__ ( self : str ) ->Optional[Any]: pass def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = '''A, <mask> AllenNLP sentence.''' UpperCAmelCase_ = tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
43
0
'''simple docstring''' from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : float ) ->float: return 0.0 def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) UpperCAmelCase_ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def __lowerCamelCase ( _UpperCamelCase : FilterType , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = 512 UpperCAmelCase_ = [1] + [0] * (size - 1) UpperCAmelCase_ = [filter_type.process(_UpperCamelCase ) for item in inputs] UpperCAmelCase_ = [0] * (samplerate - size) # zero-padding outputs += filler UpperCAmelCase_ = np.abs(np.fft.fft(_UpperCamelCase ) ) UpperCAmelCase_ = 20 * np.logaa(_UpperCamelCase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds UpperCAmelCase_ = get_bounds(_UpperCamelCase , _UpperCamelCase ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(_UpperCamelCase ) plt.show() def __lowerCamelCase ( _UpperCamelCase : FilterType , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = 512 UpperCAmelCase_ = [1] + [0] * (size - 1) UpperCAmelCase_ = [filter_type.process(_UpperCamelCase ) for item in inputs] UpperCAmelCase_ = [0] * (samplerate - size) # zero-padding outputs += filler UpperCAmelCase_ = np.angle(np.fft.fft(_UpperCamelCase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(_UpperCamelCase , -2 * pi ) ) plt.show()
700
'''simple docstring''' def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] ): '''simple docstring''' UpperCAmelCase_ = '''''' for word_or_phrase in separated: if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise Exception('''join() accepts only strings to be joined''' ) joined += word_or_phrase + separator return joined.strip(_UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
43
0
'''simple docstring''' from __future__ import annotations from dataclasses import dataclass @dataclass class lowerCamelCase : '''simple docstring''' lowerCAmelCase__ = 42 lowerCAmelCase__ = None lowerCAmelCase__ = None def __lowerCamelCase ( _UpperCamelCase : TreeNode | None ): '''simple docstring''' def is_valid_tree(_UpperCamelCase : TreeNode | None ) -> bool: if node is None: return True if not isinstance(_UpperCamelCase , _UpperCamelCase ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(_UpperCamelCase ): raise ValueError( '''Each node should be type of TreeNode and data should be float.''' ) def is_binary_search_tree_recursive_check( _UpperCamelCase : TreeNode | None , _UpperCamelCase : float , _UpperCamelCase : float ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , _UpperCamelCase , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , _UpperCamelCase ) ) return is_binary_search_tree_recursive_check(_UpperCamelCase , -float('''inf''' ) , float('''inf''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
701
'''simple docstring''' import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowercase__ : Optional[int] = logging.get_logger(__name__) def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, Iterable[int]] , _UpperCamelCase : bool , _UpperCamelCase : int ): '''simple docstring''' def constraint_to_multiple_of(_UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : str=None ): UpperCAmelCase_ = round(val / multiple ) * multiple if max_val is not None and x > max_val: UpperCAmelCase_ = math.floor(val / multiple ) * multiple if x < min_val: UpperCAmelCase_ = math.ceil(val / multiple ) * multiple return x UpperCAmelCase_ = (output_size, output_size) if isinstance(_UpperCamelCase , _UpperCamelCase ) else output_size UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ = output_size # determine new height and width UpperCAmelCase_ = output_height / input_height UpperCAmelCase_ = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width UpperCAmelCase_ = scale_width else: # fit height UpperCAmelCase_ = scale_height UpperCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=_UpperCamelCase ) UpperCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=_UpperCamelCase ) return (new_height, new_width) class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = ['''pixel_values'''] def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : str , ) ->None: super().__init__(**UpperCAmelCase__ ) UpperCAmelCase_ = size if size is not None else {'''height''': 384, '''width''': 384} UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) UpperCAmelCase_ = do_resize UpperCAmelCase_ = size UpperCAmelCase_ = keep_aspect_ratio UpperCAmelCase_ = ensure_multiple_of UpperCAmelCase_ = resample UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[str] , ) ->np.ndarray: UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) UpperCAmelCase_ = get_resize_output_image_size( UpperCAmelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCAmelCase__ , multiple=UpperCAmelCase__ , ) return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) ->Any: return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) ->np.ndarray: return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : Any , ) ->PIL.Image.Image: UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ = size if size is not None else self.size UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) UpperCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio UpperCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of UpperCAmelCase_ = resample if resample is not None else self.resample UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ = image_std if image_std is not None else self.image_std UpperCAmelCase_ = make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(UpperCAmelCase__ ) for image in images] if do_resize: UpperCAmelCase_ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images] if do_normalize: UpperCAmelCase_ = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images] UpperCAmelCase_ = {'''pixel_values''': images} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ ) def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None ) ->Optional[Any]: UpperCAmelCase_ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(UpperCAmelCase__ ): UpperCAmelCase_ = target_sizes.numpy() UpperCAmelCase_ = [] for idx in range(len(UpperCAmelCase__ ) ): UpperCAmelCase_ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCAmelCase__ ) UpperCAmelCase_ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCAmelCase__ ) else: UpperCAmelCase_ = logits.argmax(dim=1 ) UpperCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
43
0
'''simple docstring''' import math def __lowerCamelCase ( _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = [] UpperCAmelCase_ = 2 UpperCAmelCase_ = int(math.sqrt(_UpperCamelCase ) ) # Size of every segment UpperCAmelCase_ = [True] * (end + 1) UpperCAmelCase_ = [] while start <= end: if temp[start] is True: in_prime.append(_UpperCamelCase ) for i in range(start * start , end + 1 , _UpperCamelCase ): UpperCAmelCase_ = False start += 1 prime += in_prime UpperCAmelCase_ = end + 1 UpperCAmelCase_ = min(2 * end , _UpperCamelCase ) while low <= n: UpperCAmelCase_ = [True] * (high - low + 1) for each in in_prime: UpperCAmelCase_ = math.floor(low / each ) * each if t < low: t += each for j in range(_UpperCamelCase , high + 1 , _UpperCamelCase ): UpperCAmelCase_ = False for j in range(len(_UpperCamelCase ) ): if temp[j] is True: prime.append(j + low ) UpperCAmelCase_ = high + 1 UpperCAmelCase_ = min(high + end , _UpperCamelCase ) return prime print(sieve(10**6))
702
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : int = logging.get_logger(__name__) lowercase__ : List[str] = { "huggingface/time-series-transformer-tourism-monthly": ( "https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''time_series_transformer''' lowerCAmelCase__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "student_t" , UpperCAmelCase__ : str = "nll" , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase__ : Optional[Union[str, bool]] = "mean" , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Tuple , ) ->Optional[int]: # time series specific configuration UpperCAmelCase_ = prediction_length UpperCAmelCase_ = context_length or prediction_length UpperCAmelCase_ = distribution_output UpperCAmelCase_ = loss UpperCAmelCase_ = input_size UpperCAmelCase_ = num_time_features UpperCAmelCase_ = lags_sequence UpperCAmelCase_ = scaling UpperCAmelCase_ = num_dynamic_real_features UpperCAmelCase_ = num_static_real_features UpperCAmelCase_ = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) UpperCAmelCase_ = cardinality else: UpperCAmelCase_ = [0] if embedding_dimension and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) UpperCAmelCase_ = embedding_dimension else: UpperCAmelCase_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] UpperCAmelCase_ = num_parallel_samples # Transformer architecture configuration UpperCAmelCase_ = input_size * len(UpperCAmelCase__ ) + self._number_of_features UpperCAmelCase_ = d_model UpperCAmelCase_ = encoder_attention_heads UpperCAmelCase_ = decoder_attention_heads UpperCAmelCase_ = encoder_ffn_dim UpperCAmelCase_ = decoder_ffn_dim UpperCAmelCase_ = encoder_layers UpperCAmelCase_ = decoder_layers UpperCAmelCase_ = dropout UpperCAmelCase_ = attention_dropout UpperCAmelCase_ = activation_dropout UpperCAmelCase_ = encoder_layerdrop UpperCAmelCase_ = decoder_layerdrop UpperCAmelCase_ = activation_function UpperCAmelCase_ = init_std UpperCAmelCase_ = use_cache super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ ) @property def lowerCAmelCase__ ( self : List[str] ) ->int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
43
0
'''simple docstring''' import os def __lowerCamelCase ( ): '''simple docstring''' with open(os.path.dirname(_UpperCamelCase ) + '''/grid.txt''' ) as f: UpperCAmelCase_ = [] # noqa: E741 for _ in range(20 ): l.append([int(_UpperCamelCase ) for x in f.readline().split()] ) UpperCAmelCase_ = 0 # right for i in range(20 ): for j in range(17 ): UpperCAmelCase_ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: UpperCAmelCase_ = temp # down for i in range(17 ): for j in range(20 ): UpperCAmelCase_ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: UpperCAmelCase_ = temp # diagonal 1 for i in range(17 ): for j in range(17 ): UpperCAmelCase_ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: UpperCAmelCase_ = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): UpperCAmelCase_ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: UpperCAmelCase_ = temp return maximum if __name__ == "__main__": print(solution())
703
'''simple docstring''' from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig lowercase__ : Dict = logging.get_logger(__name__) lowercase__ : List[Any] = "T5Config" class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''mt5''' lowerCAmelCase__ = MTaConfig class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''mt5''' lowerCAmelCase__ = MTaConfig class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''mt5''' lowerCAmelCase__ = MTaConfig
43
0
'''simple docstring''' import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = BioGptTokenizer lowerCAmelCase__ = False def lowerCAmelCase__ ( self : Any ) ->List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) UpperCAmelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase__ ) ) def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : Optional[Any] ) ->Tuple: UpperCAmelCase_ = '''lower newer''' UpperCAmelCase_ = '''lower newer''' return input_text, output_text def lowerCAmelCase__ ( self : Tuple ) ->Tuple: UpperCAmelCase_ = BioGptTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase_ = '''lower''' UpperCAmelCase_ = ['''low''', '''er</w>'''] UpperCAmelCase_ = tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = tokens + ['''<unk>'''] UpperCAmelCase_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Any ) ->Optional[Any]: UpperCAmelCase_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
704
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets lowercase__ : str = datasets.logging.get_logger(__name__) lowercase__ : Dict = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n" lowercase__ : str = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n" lowercase__ : str = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase__ ( self : List[Any] ) ->Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''sources''': datasets.Value('''string''' , id='''sequence''' ), '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[ '''https://github.com/Unbabel/COMET''', '''https://www.aclweb.org/anthology/2020.emnlp-main.213/''', '''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''', ] , ) def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Union[str, Any] ) ->Any: if self.config_name == "default": UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) ) else: UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=False ) ->Optional[Any]: if gpus is None: UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0 UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references} UpperCAmelCase_ = [dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) for t in zip(*data.values() )] UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(UpperCAmelCase__ , gpus=UpperCAmelCase__ , progress_bar=UpperCAmelCase__ ) return {"mean_score": mean_score, "scores": scores}
43
0
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Union[str, Any]=400 , UpperCAmelCase__ : List[Any]=3 , ) ->Dict: UpperCAmelCase_ = parent UpperCAmelCase_ = do_resize UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288} UpperCAmelCase_ = size_divisor UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = do_center_crop UpperCAmelCase_ = image_mean UpperCAmelCase_ = image_std UpperCAmelCase_ = do_pad UpperCAmelCase_ = batch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = min_resolution UpperCAmelCase_ = max_resolution def lowerCAmelCase__ ( self : Tuple ) ->List[Any]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=False ) ->Any: if not batched: UpperCAmelCase_ = self.size['''shortest_edge'''] UpperCAmelCase_ = image_inputs[0] if isinstance(UpperCAmelCase__ , Image.Image ): UpperCAmelCase_ , UpperCAmelCase_ = image.size else: UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2] UpperCAmelCase_ = size / min(UpperCAmelCase__ , UpperCAmelCase__ ) if h < w: UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w else: UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size UpperCAmelCase_ = int((1333 / 800) * size ) if max(UpperCAmelCase__ , UpperCAmelCase__ ) > max_size: UpperCAmelCase_ = max_size / max(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = newh * scale UpperCAmelCase_ = neww * scale UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5 ), int(neww + 0.5 ) UpperCAmelCase_ , UpperCAmelCase_ = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: UpperCAmelCase_ = [] for image in image_inputs: UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0] UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = BridgeTowerImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self : Optional[int] ) ->str: UpperCAmelCase_ = BridgeTowerImageProcessingTester(self ) @property def lowerCAmelCase__ ( self : List[str] ) ->Dict: return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self : Optional[int] ) ->int: UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''size_divisor''' ) ) def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]: pass def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self : Any ) ->Optional[int]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self : int ) ->List[str]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
705
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Union[str, Any]=400 , UpperCAmelCase__ : List[Any]=3 , ) ->Dict: UpperCAmelCase_ = parent UpperCAmelCase_ = do_resize UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288} UpperCAmelCase_ = size_divisor UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = do_center_crop UpperCAmelCase_ = image_mean UpperCAmelCase_ = image_std UpperCAmelCase_ = do_pad UpperCAmelCase_ = batch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = min_resolution UpperCAmelCase_ = max_resolution def lowerCAmelCase__ ( self : Tuple ) ->List[Any]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=False ) ->Any: if not batched: UpperCAmelCase_ = self.size['''shortest_edge'''] UpperCAmelCase_ = image_inputs[0] if isinstance(UpperCAmelCase__ , Image.Image ): UpperCAmelCase_ , UpperCAmelCase_ = image.size else: UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2] UpperCAmelCase_ = size / min(UpperCAmelCase__ , UpperCAmelCase__ ) if h < w: UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w else: UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size UpperCAmelCase_ = int((1333 / 800) * size ) if max(UpperCAmelCase__ , UpperCAmelCase__ ) > max_size: UpperCAmelCase_ = max_size / max(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = newh * scale UpperCAmelCase_ = neww * scale UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5 ), int(neww + 0.5 ) UpperCAmelCase_ , UpperCAmelCase_ = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: UpperCAmelCase_ = [] for image in image_inputs: UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0] UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = BridgeTowerImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self : Optional[int] ) ->str: UpperCAmelCase_ = BridgeTowerImageProcessingTester(self ) @property def lowerCAmelCase__ ( self : List[str] ) ->Dict: return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self : Optional[int] ) ->int: UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''size_divisor''' ) ) def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]: pass def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self : Any ) ->Optional[int]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self : int ) ->List[str]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
43
0
'''simple docstring''' lowercase__ : List[Any] = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } lowercase__ : List[str] = {value: key for key, value in encode_dict.items()} def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' UpperCAmelCase_ = '''''' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' if set(_UpperCamelCase ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) UpperCAmelCase_ = '''''' for word in coded.split(): while len(_UpperCamelCase ) != 0: decoded += decode_dict[word[:5]] UpperCAmelCase_ = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
706
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Dict ) ->List[str]: UpperCAmelCase_ = tf.convert_to_tensor( [ [ 8.222_0991, # 3rd highest value; idx. 0 -0.562_0044, 5.2322_9752, 4.038_6393, -6.879_8378, -0.5478_5802, -3.201_2153, 2.9277_7176, 1.8817_1953, 7.3534_1276, # 5th highest value; idx. 9 8.4320_7833, # 2nd highest value; idx. 10 -9.8571_1836, -5.9620_9236, -1.1303_9161, -7.111_5294, -0.836_9633, -5.318_6408, 7.0642_7407, 0.8136_9344, -0.8202_3817, -5.917_9796, 0.5881_3443, -6.9977_8438, 4.7155_1189, -0.1877_1637, 7.4402_0759, # 4th highest value; idx. 25 9.3845_0987, # 1st highest value; idx. 26 2.1266_2941, -9.3256_2038, 2.3565_2522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.5842_5518, 4.5313_9238, -5.5751_0464, -6.2803_0699, -7.1952_9503, -4.0212_2551, 1.3933_7037, -6.0670_7057, 1.5948_0517, -9.64_3119, 0.0390_7799, 0.6723_1762, -8.8820_6726, 6.2711_5922, # 4th highest value; idx. 13 2.2852_0723, 4.8276_7506, 4.3042_1368, 8.827_5313, # 2nd highest value; idx. 17 5.4402_9958, # 5th highest value; idx. 18 -4.473_5794, 7.3857_9536, # 3rd highest value; idx. 20 -2.9105_1663, 2.6194_6077, -2.567_4762, -9.4895_9302, -4.0292_2645, -1.3541_6918, 9.6770_2323, # 1st highest value; idx. 27 -5.8947_8553, 1.8537_0467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) UpperCAmelCase_ = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above UpperCAmelCase_ = tf.convert_to_tensor( [8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above UpperCAmelCase_ = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) UpperCAmelCase_ = output[output != -float('''inf''' )] UpperCAmelCase_ = tf.cast( tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-12 ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @require_tf class lowerCamelCase ( unittest.TestCase , lowerCamelCase ): '''simple docstring''' if is_tf_available(): lowerCAmelCase__ = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]: # TF-only test: tf.saved_model export UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 2 UpperCAmelCase_ = 2 class lowerCamelCase ( tf.Module ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : List[str] ) ->Dict: super(UpperCAmelCase__ , self ).__init__() UpperCAmelCase_ = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=UpperCAmelCase__ , ) def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ) ->int: UpperCAmelCase_ = self.model.generate( input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , ) return {"sequences": outputs["sequences"]} UpperCAmelCase_ = [[2, 0], [102, 103]] UpperCAmelCase_ = [[1, 0], [1, 1]] UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} ) UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default'''] for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ): UpperCAmelCase_ = { '''input_ids''': tf.constant(dummy_input_ids[:batch_size] ), '''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ), } UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences'''] UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]: # TF-only test: tf.saved_model export UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 1 UpperCAmelCase_ = 2 class lowerCamelCase ( tf.Module ): '''simple docstring''' def __init__( self : int , UpperCAmelCase__ : int ) ->List[str]: super(UpperCAmelCase__ , self ).__init__() UpperCAmelCase_ = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=UpperCAmelCase__ , ) def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) ->int: UpperCAmelCase_ = self.model.generate( input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , ) return {"sequences": outputs["sequences"]} UpperCAmelCase_ = [[2], [102, 103]] UpperCAmelCase_ = [[1], [1, 1]] UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} ) UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default'''] for input_row in range(len(UpperCAmelCase__ ) ): UpperCAmelCase_ = { '''input_ids''': tf.constant([dummy_input_ids[input_row]] ), '''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ), } UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences'''] UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @slow @require_tensorflow_text def lowerCAmelCase__ ( self : Optional[Any] ) ->int: # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCAmelCase__ ) class lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ) ->Any: super().__init__() UpperCAmelCase_ = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , '''spiece.model''' ) , '''rb''' ).read() ) UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : str ) ->List[str]: UpperCAmelCase_ = self.tokenizer.tokenize(UpperCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = text.pad_model_inputs( UpperCAmelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) UpperCAmelCase_ = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) return self.tokenizer.detokenize(UpperCAmelCase__ ) UpperCAmelCase_ = CompleteSentenceTransformer() UpperCAmelCase_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' ) UpperCAmelCase_ = complete_model(UpperCAmelCase__ ) UpperCAmelCase_ = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ ) keras_model.save(UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple: # Has PT equivalent: this test relies on random sampling UpperCAmelCase_ = { '''do_sample''': True, '''num_beams''': 1, '''top_p''': 0.7, '''top_k''': 10, '''temperature''': 0.7, } UpperCAmelCase_ = 14 UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = '''Hello, my dog is cute and''' UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''tf''' ) UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) UpperCAmelCase_ = [638, 198] with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]: # Has PT equivalent: ample use of framework-specific code UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = '''Hugging Face is a technology company based in New York and Paris.''' UpperCAmelCase_ = bart_tokenizer(UpperCAmelCase__ , return_tensors='''tf''' ).input_ids UpperCAmelCase_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy() class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int ) ->List[str]: return super().call(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ , foo='''bar''' ).numpy() self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) ) class lowerCamelCase ( bart_model.model.encoder.__class__ ): '''simple docstring''' def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict ) ->Any: return super().call(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = FakeEncoder(bart_model.config , bart_model.model.shared ) UpperCAmelCase_ = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy() with self.assertRaises(UpperCAmelCase__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(UpperCAmelCase__ , foo='''bar''' )
43
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : Dict = logging.get_logger(__name__) lowercase__ : Tuple = { "uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json", } class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''mra''' def __init__( self : List[str] , UpperCAmelCase__ : Optional[int]=5_0265 , UpperCAmelCase__ : Tuple=768 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=3072 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : str=1e-5 , UpperCAmelCase__ : Any="absolute" , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Tuple="full" , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : List[str]=0 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : List[str]=0 , UpperCAmelCase__ : Optional[int]=2 , **UpperCAmelCase__ : List[str] , ) ->Optional[Any]: super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = initializer_range UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = position_embedding_type UpperCAmelCase_ = block_per_row UpperCAmelCase_ = approx_mode UpperCAmelCase_ = initial_prior_first_n_blocks UpperCAmelCase_ = initial_prior_diagonal_n_blocks
707
'''simple docstring''' from collections.abc import Callable def __lowerCamelCase ( _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float , _UpperCamelCase : float ): '''simple docstring''' UpperCAmelCase_ = a UpperCAmelCase_ = b if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function return a elif function(_UpperCamelCase ) == 0: return b elif ( function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: UpperCAmelCase_ = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_UpperCamelCase ) == 0: return mid elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0: UpperCAmelCase_ = mid else: UpperCAmelCase_ = mid UpperCAmelCase_ = start + (end - start) / 2.0 return mid def __lowerCamelCase ( _UpperCamelCase : float ): '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
43
0
'''simple docstring''' from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''new-model''' if is_tf_available(): class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = NewModelConfig @require_tf class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase__ ( self : int ) ->int: UpperCAmelCase_ = '''bert-base-cased''' UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = TFAutoModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : int ) ->Any: UpperCAmelCase_ = '''bert-base-cased''' UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Tuple ) ->str: for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Tuple ) ->str: for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]: for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Tuple ) ->int: for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : List[str] ) ->Any: # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : List[str] ) ->Any: # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) @slow @require_tensorflow_probability def lowerCAmelCase__ ( self : Optional[Any] ) ->int: for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained( UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : int ) ->Optional[int]: UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(model.num_parameters() , 1_4410 ) self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 1_4410 ) def lowerCAmelCase__ ( self : int ) ->int: UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(model.num_parameters() , 1_4410 ) self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 1_4410 ) def lowerCAmelCase__ ( self : List[str] ) ->List[Any]: # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel UpperCAmelCase_ = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = copy.deepcopy(model.config ) UpperCAmelCase_ = ['''FunnelBaseModel'''] UpperCAmelCase_ = TFAutoModel.from_config(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(UpperCAmelCase__ ) UpperCAmelCase_ = TFAutoModel.from_pretrained(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : str ) ->Optional[int]: try: AutoConfig.register('''new-model''' , UpperCAmelCase__ ) UpperCAmelCase_ = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(UpperCAmelCase__ ): auto_class.register(UpperCAmelCase__ , UpperCAmelCase__ ) auto_class.register(UpperCAmelCase__ , UpperCAmelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCAmelCase__ ): auto_class.register(UpperCAmelCase__ , UpperCAmelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase_ = BertModelTester(self ).get_config() UpperCAmelCase_ = NewModelConfig(**tiny_config.to_dict() ) UpperCAmelCase_ = auto_class.from_config(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(UpperCAmelCase__ ) UpperCAmelCase_ = auto_class.from_pretrained(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple: with self.assertRaisesRegex( UpperCAmelCase__ , '''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase_ = TFAutoModel.from_pretrained('''bert-base''' ) def lowerCAmelCase__ ( self : Union[str, Any] ) ->str: with self.assertRaisesRegex( UpperCAmelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase_ = TFAutoModel.from_pretrained(UpperCAmelCase__ , revision='''aaaaaa''' ) def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]: with self.assertRaisesRegex( UpperCAmelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ): UpperCAmelCase_ = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def lowerCAmelCase__ ( self : Union[str, Any] ) ->str: with self.assertRaisesRegex(UpperCAmelCase__ , '''Use `from_pt=True` to load this model''' ): UpperCAmelCase_ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) def lowerCAmelCase__ ( self : List[str] ) ->Dict: # Make sure we have cached the model. UpperCAmelCase_ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: UpperCAmelCase_ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint UpperCAmelCase_ = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) with RequestCounter() as counter: UpperCAmelCase_ = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
708
'''simple docstring''' import re def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )] def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' UpperCAmelCase_ = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : str ): '''simple docstring''' try: UpperCAmelCase_ = split_input(_UpperCamelCase ) if upper: UpperCAmelCase_ = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: UpperCAmelCase_ = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' return to_simple_case(_UpperCamelCase ) def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' try: UpperCAmelCase_ = to_simple_case(_UpperCamelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ): '''simple docstring''' return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''_''' ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ): '''simple docstring''' return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''-''' ) if __name__ == "__main__": __import__("doctest").testmod()
43
0
import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor lowercase__ : Dict = logging.get_logger(__name__) class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' def __init__( self : List[Any] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[int] ) ->None: warnings.warn( '''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DeiTImageProcessor instead.''' , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
709
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowercase__ : Optional[Any] = "platform" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=None , _UpperCamelCase : int=None , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=None , ): '''simple docstring''' if attention_mask is None: UpperCAmelCase_ = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: UpperCAmelCase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: UpperCAmelCase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCamelCase : '''simple docstring''' def __init__( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[int]=99 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Union[str, Any]=0.02 , ) ->Optional[int]: UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = eos_token_id UpperCAmelCase_ = pad_token_id UpperCAmelCase_ = bos_token_id UpperCAmelCase_ = initializer_range def lowerCAmelCase__ ( self : int ) ->Any: UpperCAmelCase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) UpperCAmelCase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 ) UpperCAmelCase_ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , ) UpperCAmelCase_ = prepare_blenderbot_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return config, inputs_dict def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]: UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs() return config, inputs_dict def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) ->Tuple: UpperCAmelCase_ = 20 UpperCAmelCase_ = model_class_name(UpperCAmelCase__ ) UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase_ , UpperCAmelCase_ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) UpperCAmelCase_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , ) UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , ) UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) ->Union[str, Any]: UpperCAmelCase_ = 20 UpperCAmelCase_ = model_class_name(UpperCAmelCase__ ) UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] ) UpperCAmelCase_ , UpperCAmelCase_ = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) UpperCAmelCase_ = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , ) UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) UpperCAmelCase_ = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , ) UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ ) UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = 99 def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]: UpperCAmelCase_ = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) UpperCAmelCase_ = input_ids.shape[0] UpperCAmelCase_ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowerCAmelCase__ ( self : Any ) ->str: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_config_and_data() UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ ) UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ ) UpperCAmelCase_ = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : str ) ->int: UpperCAmelCase_ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ ) UpperCAmelCase_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) UpperCAmelCase_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ ) UpperCAmelCase_ = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]: UpperCAmelCase_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 ) UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum() UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(UpperCAmelCase__ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowerCamelCase ( lowerCamelCase , unittest.TestCase , lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = True lowerCAmelCase__ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]: UpperCAmelCase_ = FlaxBlenderbotModelTester(self ) def lowerCAmelCase__ ( self : str ) ->Tuple: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Tuple ) ->str: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Dict ) ->Tuple: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = model_class(UpperCAmelCase__ ) @jax.jit def encode_jitted(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : Union[str, Any] ): return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple() self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase__ ( self : str ) ->str: UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ = model_class(UpperCAmelCase__ ) UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) UpperCAmelCase_ = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ): return model.decode( decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , ) with self.subTest('''JIT Enabled''' ): UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple() self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) ) for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCAmelCase__ ( self : int ) ->int: for model_class_name in self.all_model_classes: UpperCAmelCase_ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids UpperCAmelCase_ = np.ones((1, 1) ) * model.config.eos_token_id UpperCAmelCase_ = model(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]: UpperCAmelCase_ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25} UpperCAmelCase_ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCAmelCase__ ) UpperCAmelCase_ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) UpperCAmelCase_ = ['''Sam'''] UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''jax''' ) UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = '''Sam is a great name. It means "sun" in Gaelic.''' UpperCAmelCase_ = tokenizer.batch_decode(UpperCAmelCase__ , **UpperCAmelCase__ ) assert generated_txt[0].strip() == tgt_text
43
0
'''simple docstring''' import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() lowercase__ : str = logging.get_logger(__name__) lowercase__ : Optional[int] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } lowercase__ : int = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : List[Any] ): '''simple docstring''' for attribute in key.split('''.''' ): UpperCAmelCase_ = getattr(_UpperCamelCase , _UpperCamelCase ) if weight_type is not None: UpperCAmelCase_ = getattr(_UpperCamelCase , _UpperCamelCase ).shape else: UpperCAmelCase_ = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase_ = value elif weight_type == "weight_g": UpperCAmelCase_ = value elif weight_type == "weight_v": UpperCAmelCase_ = value elif weight_type == "bias": UpperCAmelCase_ = value else: UpperCAmelCase_ = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ = [] UpperCAmelCase_ = fairseq_model.state_dict() UpperCAmelCase_ = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight UpperCAmelCase_ = None for name, value in fairseq_dict.items(): UpperCAmelCase_ = False if "conv_layers" in name: load_conv_layer( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase_ = True elif name.split('''.''' )[0] == "proj": UpperCAmelCase_ = fairseq_model.proj UpperCAmelCase_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: UpperCAmelCase_ = True if "*" in mapped_key: UpperCAmelCase_ = name.split(_UpperCamelCase )[0].split('''.''' )[-2] UpperCAmelCase_ = mapped_key.replace('''*''' , _UpperCamelCase ) if "weight_g" in name: UpperCAmelCase_ = '''weight_g''' elif "weight_v" in name: UpperCAmelCase_ = '''weight_v''' elif "bias" in name: UpperCAmelCase_ = '''bias''' elif "weight" in name: UpperCAmelCase_ = '''weight''' else: UpperCAmelCase_ = None set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) continue if not is_used: unused_weights.append(_UpperCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) return proj_weight def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] ): '''simple docstring''' UpperCAmelCase_ = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase_ = name.split('''.''' ) UpperCAmelCase_ = int(items[0] ) UpperCAmelCase_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase_ = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase_ = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) UpperCAmelCase_ = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase_ = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_UpperCamelCase ) def __lowerCamelCase ( _UpperCamelCase : Any ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ = emb.weight.shape UpperCAmelCase_ = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase ) UpperCAmelCase_ = emb.weight.data return lin_layer def __lowerCamelCase ( _UpperCamelCase : Dict ): '''simple docstring''' with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: UpperCAmelCase_ = f.readlines() UpperCAmelCase_ = [line.split(''' ''' )[0] for line in lines] UpperCAmelCase_ = len(_UpperCamelCase ) UpperCAmelCase_ = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(_UpperCamelCase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : int , ): '''simple docstring''' UpperCAmelCase_ = WavaVecaConfig.from_pretrained(_UpperCamelCase ) UpperCAmelCase_ = SpeechaTextaConfig.from_pretrained( _UpperCamelCase , vocab_size=_UpperCamelCase , decoder_layers=_UpperCamelCase , do_stable_layer_norm=_UpperCamelCase ) UpperCAmelCase_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) UpperCAmelCase_ = model[0].eval() # set weights for wav2vec2 encoder UpperCAmelCase_ = WavaVecaModel(_UpperCamelCase ) UpperCAmelCase_ = recursively_load_weights_wavaveca(model.encoder , _UpperCamelCase ) UpperCAmelCase_ = SpeechaTextaForCausalLM(_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_UpperCamelCase ) # set output linear layer unexpected_keys.remove('''embed_out''' ) UpperCAmelCase_ = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) UpperCAmelCase_ = SpeechEncoderDecoderModel(encoder=_UpperCamelCase , decoder=_UpperCamelCase ) UpperCAmelCase_ = False # add projection layer UpperCAmelCase_ = nn.Parameter(projection_layer.weight ) UpperCAmelCase_ = nn.Parameter(projection_layer.bias ) UpperCAmelCase_ = create_vocab_dict(_UpperCamelCase ) with open(os.path.join(_UpperCamelCase , '''vocab.json''' ) , '''w''' ) as fp: json.dump(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = SpeechaTextaTokenizer(os.path.join(_UpperCamelCase , '''vocab.json''' ) ) tokenizer.save_pretrained(_UpperCamelCase ) UpperCAmelCase_ = hf_wavavec.config.to_dict() UpperCAmelCase_ = tokenizer.pad_token_id UpperCAmelCase_ = tokenizer.bos_token_id UpperCAmelCase_ = tokenizer.eos_token_id UpperCAmelCase_ = '''speech_to_text_2''' UpperCAmelCase_ = '''wav2vec2''' UpperCAmelCase_ = SpeechEncoderDecoderConfig.from_dict(_UpperCamelCase ) hf_wavavec.save_pretrained(_UpperCamelCase ) feature_extractor.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowercase__ : Any = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") lowercase__ : List[str] = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
710
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowercase__ : Tuple = pytest.mark.integration @pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] ) def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ): '''simple docstring''' inspect_dataset(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = path + '''.py''' assert script_name in os.listdir(_UpperCamelCase ) assert "__pycache__" not in os.listdir(_UpperCamelCase ) @pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.parametrize('''path''' , ['''accuracy'''] ) def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ): '''simple docstring''' inspect_metric(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = path + '''.py''' assert script_name in os.listdir(_UpperCamelCase ) assert "__pycache__" not in os.listdir(_UpperCamelCase ) @pytest.mark.parametrize( '''path, config_name, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ): '''simple docstring''' with pytest.raises(_UpperCamelCase ): get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase ) @pytest.mark.parametrize( '''path, expected''' , [ ('''squad''', '''plain_text'''), ('''acronym_identification''', '''default'''), ('''lhoestq/squad''', '''plain_text'''), ('''lhoestq/test''', '''default'''), ('''lhoestq/demo1''', '''lhoestq--demo1'''), ('''dalle-mini/wit''', '''dalle-mini--wit'''), ] , ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ = get_dataset_config_names(_UpperCamelCase ) assert expected in config_names @pytest.mark.parametrize( '''path, expected_configs, expected_splits_in_first_config''' , [ ('''squad''', ['''plain_text'''], ['''train''', '''validation''']), ('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']), ('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase ) assert list(infos.keys() ) == expected_configs UpperCAmelCase_ = expected_configs[0] assert expected_config in infos UpperCAmelCase_ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( '''path, expected_config, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : str ): '''simple docstring''' UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase ) assert expected_config in infos UpperCAmelCase_ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ): '''simple docstring''' with pytest.raises(_UpperCamelCase ): get_dataset_split_names(_UpperCamelCase , config_name=_UpperCamelCase )
43
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowercase__ : Optional[Any] = logging.get_logger(__name__) def __lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str]=False , _UpperCamelCase : Tuple=False ): '''simple docstring''' UpperCAmelCase_ = '''backbone.''' if is_semantic else '''''' UpperCAmelCase_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""{prefix}blocks.{i}.norm1.weight""", F"""beit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""{prefix}blocks.{i}.norm1.bias""", F"""beit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""{prefix}blocks.{i}.attn.proj.weight""", F"""beit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (F"""{prefix}blocks.{i}.attn.proj.bias""", F"""beit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""{prefix}blocks.{i}.norm2.weight""", F"""beit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""{prefix}blocks.{i}.norm2.bias""", F"""beit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.weight""", F"""beit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.bias""", F"""beit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.weight""", F"""beit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.bias""", F"""beit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ (F"""{prefix}cls_token""", '''beit.embeddings.cls_token'''), (F"""{prefix}patch_embed.proj.weight""", '''beit.embeddings.patch_embeddings.projection.weight'''), (F"""{prefix}patch_embed.proj.bias""", '''beit.embeddings.patch_embeddings.projection.bias'''), (F"""{prefix}pos_embed""", '''beit.embeddings.position_embeddings'''), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('''mask_token''', '''beit.embeddings.mask_token'''), ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ] ) else: # layernorm + classification head rename_keys.extend( [ ('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''), ('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Dict=False , _UpperCamelCase : Tuple=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): UpperCAmelCase_ = '''backbone.''' if is_semantic else '''''' # queries, keys and values UpperCAmelCase_ = state_dict.pop(F"""{prefix}blocks.{i}.attn.qkv.weight""" ) UpperCAmelCase_ = state_dict.pop(F"""{prefix}blocks.{i}.attn.q_bias""" ) UpperCAmelCase_ = state_dict.pop(F"""{prefix}blocks.{i}.attn.v_bias""" ) UpperCAmelCase_ = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase_ = q_bias UpperCAmelCase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase_ = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase_ = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained UpperCAmelCase_ = state_dict.pop(F"""{prefix}blocks.{i}.gamma_1""" ) UpperCAmelCase_ = state_dict.pop(F"""{prefix}blocks.{i}.gamma_2""" ) UpperCAmelCase_ = gamma_a UpperCAmelCase_ = gamma_a def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ): '''simple docstring''' UpperCAmelCase_ = dct.pop(_UpperCamelCase ) UpperCAmelCase_ = val def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase_ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im @torch.no_grad() def __lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : int=False ): '''simple docstring''' UpperCAmelCase_ = False if '''rvlcdip''' in checkpoint_url else True UpperCAmelCase_ = BeitConfig(use_absolute_position_embeddings=_UpperCamelCase , use_mask_token=_UpperCamelCase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: UpperCAmelCase_ = 1024 UpperCAmelCase_ = 4096 UpperCAmelCase_ = 24 UpperCAmelCase_ = 16 # labels if "rvlcdip" in checkpoint_url: UpperCAmelCase_ = 16 UpperCAmelCase_ = '''huggingface/label-files''' UpperCAmelCase_ = '''rvlcdip-id2label.json''' UpperCAmelCase_ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase_ = {int(_UpperCamelCase ): v for k, v in idalabel.items()} UpperCAmelCase_ = idalabel UpperCAmelCase_ = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys UpperCAmelCase_ = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='''cpu''' )['''model'''] UpperCAmelCase_ = create_rename_keys(_UpperCamelCase , has_lm_head=_UpperCamelCase ) for src, dest in rename_keys: rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , has_lm_head=_UpperCamelCase ) # load HuggingFace model UpperCAmelCase_ = BeitForMaskedImageModeling(_UpperCamelCase ) if has_lm_head else BeitForImageClassification(_UpperCamelCase ) model.eval() model.load_state_dict(_UpperCamelCase ) # Check outputs on an image UpperCAmelCase_ = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_UpperCamelCase ) UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ) UpperCAmelCase_ = encoding['''pixel_values'''] UpperCAmelCase_ = model(_UpperCamelCase ) UpperCAmelCase_ = outputs.logits # verify logits UpperCAmelCase_ = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(_UpperCamelCase ), "Shape of logits not as expected" Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(_UpperCamelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_UpperCamelCase ) if push_to_hub: if has_lm_head: UpperCAmelCase_ = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large''' else: UpperCAmelCase_ = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip''' image_processor.push_to_hub( repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_UpperCamelCase , ) model.push_to_hub( repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_UpperCamelCase , ) if __name__ == "__main__": lowercase__ : Dict = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) lowercase__ : int = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
711
'''simple docstring''' import collections import os import re from pathlib import Path lowercase__ : List[Any] = "src/transformers" # Matches is_xxx_available() lowercase__ : Optional[Any] = re.compile(R"is\_([a-z_]*)_available()") # Catches a one-line _import_struct = {xxx} lowercase__ : Any = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowercase__ : Union[str, Any] = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]") # Catches a line if not is_foo_available lowercase__ : Optional[int] = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") # Catches a line _import_struct["bla"].append("foo") lowercase__ : List[str] = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowercase__ : Any = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") # Catches a line with an object between quotes and a comma: "MyModel", lowercase__ : List[Any] = re.compile(R"^\s+\"([^\"]+)\",") # Catches a line with objects between brackets only: ["foo", "bar"], lowercase__ : Optional[Any] = re.compile(R"^\s+\[([^\]]+)\]") # Catches a line with from foo import bar, bla, boo lowercase__ : Union[str, Any] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Catches a line with try: lowercase__ : int = re.compile(R"^\s*try:") # Catches a line with else: lowercase__ : Any = re.compile(R"^\s*else:") def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ): '''simple docstring''' if _re_test_backend.search(_UpperCamelCase ) is None: return None UpperCAmelCase_ = [b[0] for b in _re_backend.findall(_UpperCamelCase )] backends.sort() return "_and_".join(_UpperCamelCase ) def __lowerCamelCase ( _UpperCamelCase : int ): '''simple docstring''' with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCAmelCase_ = f.readlines() UpperCAmelCase_ = 0 while line_index < len(_UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure UpperCAmelCase_ = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: UpperCAmelCase_ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_UpperCamelCase ): UpperCAmelCase_ = _re_one_line_import_struct.search(_UpperCamelCase ).groups()[0] UpperCAmelCase_ = re.findall(R'''\[([^\]]+)\]''' , _UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue UpperCAmelCase_ = _re_import_struct_key_value.search(_UpperCamelCase ) if single_line_import_search is not None: UpperCAmelCase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_UpperCamelCase ) > 0] objects.extend(_UpperCamelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 UpperCAmelCase_ = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCAmelCase_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): UpperCAmelCase_ = lines[line_index] if _re_import_struct_add_one.search(_UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(_UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(_UpperCamelCase ) is not None: UpperCAmelCase_ = _re_import_struct_add_many.search(_UpperCamelCase ).groups()[0].split(''', ''' ) UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0] objects.extend(_UpperCamelCase ) elif _re_between_brackets.search(_UpperCamelCase ) is not None: UpperCAmelCase_ = _re_between_brackets.search(_UpperCamelCase ).groups()[0].split(''', ''' ) UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0] objects.extend(_UpperCamelCase ) elif _re_quote_object.search(_UpperCamelCase ) is not None: objects.append(_re_quote_object.search(_UpperCamelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 UpperCAmelCase_ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCAmelCase_ = [] while ( line_index < len(_UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): UpperCAmelCase_ = lines[line_index] UpperCAmelCase_ = _re_import.search(_UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCAmelCase_ = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(_UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. UpperCAmelCase_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): UpperCAmelCase_ = lines[line_index] UpperCAmelCase_ = _re_import.search(_UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 UpperCAmelCase_ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ): '''simple docstring''' def find_duplicates(_UpperCamelCase : Tuple ): return [k for k, v in collections.Counter(_UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCAmelCase_ = [] for key in import_dict_objects.keys(): UpperCAmelCase_ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) UpperCAmelCase_ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCAmelCase_ = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = [] for root, _, files in os.walk(_UpperCamelCase ): if "__init__.py" in files: UpperCAmelCase_ = os.path.join(_UpperCamelCase , '''__init__.py''' ) UpperCAmelCase_ = parse_init(_UpperCamelCase ) if objects is not None: UpperCAmelCase_ = analyze_results(*_UpperCamelCase ) if len(_UpperCamelCase ) > 0: UpperCAmelCase_ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(_UpperCamelCase ) ) if len(_UpperCamelCase ) > 0: raise ValueError('''\n\n'''.join(_UpperCamelCase ) ) def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = [] for path, directories, files in os.walk(_UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(_UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue UpperCAmelCase_ = str((Path(_UpperCamelCase ) / folder).relative_to(_UpperCamelCase ) ) UpperCAmelCase_ = short_path.replace(os.path.sep , '''.''' ) submodules.append(_UpperCamelCase ) for fname in files: if fname == "__init__.py": continue UpperCAmelCase_ = str((Path(_UpperCamelCase ) / fname).relative_to(_UpperCamelCase ) ) UpperCAmelCase_ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(_UpperCamelCase ) return submodules lowercase__ : Union[str, Any] = [ "convert_pytorch_checkpoint_to_tf2", "modeling_flax_pytorch_utils", "models.esm.openfold_utils", ] def __lowerCamelCase ( ): '''simple docstring''' from transformers.utils import direct_transformers_import UpperCAmelCase_ = direct_transformers_import(_UpperCamelCase ) UpperCAmelCase_ = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f: UpperCAmelCase_ = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , _UpperCamelCase ) ) ) UpperCAmelCase_ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_UpperCamelCase ) > 0: UpperCAmelCase_ = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
43
0
'''simple docstring''' from __future__ import annotations def __lowerCamelCase ( _UpperCamelCase : list[int] ): # This function is recursive '''simple docstring''' UpperCAmelCase_ = len(_UpperCamelCase ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else UpperCAmelCase_ = array[0] UpperCAmelCase_ = False UpperCAmelCase_ = 1 UpperCAmelCase_ = [] while not is_found and i < array_length: if array[i] < pivot: UpperCAmelCase_ = True UpperCAmelCase_ = [element for element in array[i:] if element >= array[i]] UpperCAmelCase_ = longest_subsequence(_UpperCamelCase ) if len(_UpperCamelCase ) > len(_UpperCamelCase ): UpperCAmelCase_ = temp_array else: i += 1 UpperCAmelCase_ = [element for element in array[1:] if element >= pivot] UpperCAmelCase_ = [pivot, *longest_subsequence(_UpperCamelCase )] if len(_UpperCamelCase ) > len(_UpperCamelCase ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
712
'''simple docstring''' from __future__ import annotations def __lowerCamelCase ( _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ = position UpperCAmelCase_ = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] UpperCAmelCase_ = [] for position in positions: UpperCAmelCase_ , UpperCAmelCase_ = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_UpperCamelCase ) return permissible_positions def __lowerCamelCase ( _UpperCamelCase : list[list[int]] ): '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def __lowerCamelCase ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ): '''simple docstring''' if is_complete(_UpperCamelCase ): return True for position in get_valid_pos(_UpperCamelCase , len(_UpperCamelCase ) ): UpperCAmelCase_ , UpperCAmelCase_ = position if board[y][x] == 0: UpperCAmelCase_ = curr + 1 if open_knight_tour_helper(_UpperCamelCase , _UpperCamelCase , curr + 1 ): return True UpperCAmelCase_ = 0 return False def __lowerCamelCase ( _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): UpperCAmelCase_ = 1 if open_knight_tour_helper(_UpperCamelCase , (i, j) , 1 ): return board UpperCAmelCase_ = 0 UpperCAmelCase_ = F"""Open Kight Tour cannot be performed on a board of size {n}""" raise ValueError(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
43
0
'''simple docstring''' from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class lowerCamelCase ( lowerCamelCase , lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''pixel_values''' lowerCAmelCase__ = False lowerCAmelCase__ = TimmBackboneConfig def __init__( self : Optional[Any] , UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Any ) ->List[Any]: requires_backends(self , '''timm''' ) super().__init__(UpperCAmelCase__ ) UpperCAmelCase_ = config if config.backbone is None: raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' ) if config.backbone not in timm.list_models(): raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" ) if hasattr(UpperCAmelCase__ , '''out_features''' ) and config.out_features is not None: raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' ) UpperCAmelCase_ = getattr(UpperCAmelCase__ , '''use_pretrained_backbone''' , UpperCAmelCase__ ) if pretrained is None: raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' ) # We just take the final layer by default. This matches the default for the transformers models. UpperCAmelCase_ = config.out_indices if getattr(UpperCAmelCase__ , '''out_indices''' , UpperCAmelCase__ ) is not None else (-1,) UpperCAmelCase_ = timm.create_model( config.backbone , pretrained=UpperCAmelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase__ , **UpperCAmelCase__ , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. UpperCAmelCase_ = self._backbone.return_layers UpperCAmelCase_ = {layer['''module''']: str(UpperCAmelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(UpperCAmelCase__ ) @classmethod def lowerCAmelCase__ ( cls : List[str] , UpperCAmelCase__ : Optional[int] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[Any] ) ->Union[str, Any]: requires_backends(cls , ['''vision''', '''timm'''] ) from ...models.timm_backbone import TimmBackboneConfig UpperCAmelCase_ = kwargs.pop('''config''' , TimmBackboneConfig() ) UpperCAmelCase_ = kwargs.pop('''use_timm_backbone''' , UpperCAmelCase__ ) if not use_timm: raise ValueError('''use_timm_backbone must be True for timm backbones''' ) UpperCAmelCase_ = kwargs.pop('''num_channels''' , config.num_channels ) UpperCAmelCase_ = kwargs.pop('''features_only''' , config.features_only ) UpperCAmelCase_ = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone ) UpperCAmelCase_ = kwargs.pop('''out_indices''' , config.out_indices ) UpperCAmelCase_ = TimmBackboneConfig( backbone=UpperCAmelCase__ , num_channels=UpperCAmelCase__ , features_only=UpperCAmelCase__ , use_pretrained_backbone=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , ) return super()._from_config(UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : int ) ->List[str]: pass def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : List[str] ) ->Union[BackboneOutput, Tuple[Tensor, ...]]: UpperCAmelCase_ = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase_ = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError('''Cannot output attentions for timm backbones at the moment''' ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone UpperCAmelCase_ = self._all_layers UpperCAmelCase_ = self._backbone(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = self._return_layers UpperCAmelCase_ = tuple(hidden_states[i] for i in self.out_indices ) else: UpperCAmelCase_ = self._backbone(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = None UpperCAmelCase_ = tuple(UpperCAmelCase__ ) UpperCAmelCase_ = tuple(UpperCAmelCase__ ) if hidden_states is not None else None if not return_dict: UpperCAmelCase_ = (feature_maps,) if output_hidden_states: UpperCAmelCase_ = output + (hidden_states,) return output return BackboneOutput(feature_maps=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ , attentions=UpperCAmelCase__ )
713
'''simple docstring''' from __future__ import annotations from typing import TypedDict class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''The parameter s type must be str.''' ) return [s[i:] + s[:i] for i in range(len(_UpperCamelCase ) )] def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''The parameter s type must be str.''' ) if not s: raise ValueError('''The parameter s must not be empty.''' ) UpperCAmelCase_ = all_rotations(_UpperCamelCase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation UpperCAmelCase_ = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(_UpperCamelCase ), } return response def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int ): '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError('''The parameter bwt_string type must be str.''' ) if not bwt_string: raise ValueError('''The parameter bwt_string must not be empty.''' ) try: UpperCAmelCase_ = int(_UpperCamelCase ) except ValueError: raise TypeError( '''The parameter idx_original_string type must be int or passive''' ''' of cast to int.''' ) if idx_original_string < 0: raise ValueError('''The parameter idx_original_string must not be lower than 0.''' ) if idx_original_string >= len(_UpperCamelCase ): raise ValueError( '''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' ) UpperCAmelCase_ = [''''''] * len(_UpperCamelCase ) for _ in range(len(_UpperCamelCase ) ): for i in range(len(_UpperCamelCase ) ): UpperCAmelCase_ = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowercase__ : Optional[int] = "Provide a string that I will generate its BWT transform: " lowercase__ : List[Any] = input(entry_msg).strip() lowercase__ : Any = bwt_transform(s) print( F'''Burrows Wheeler transform for string \'{s}\' results ''' F'''in \'{result['bwt_string']}\'''' ) lowercase__ : Dict = reverse_bwt(result["bwt_string"], result["idx_original_string"]) print( F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' ''' F'''we get original string \'{original_string}\'''' )
43
0
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : int = logging.get_logger(__name__) lowercase__ : List[str] = { "huggingface/time-series-transformer-tourism-monthly": ( "https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''time_series_transformer''' lowerCAmelCase__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "student_t" , UpperCAmelCase__ : str = "nll" , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase__ : Optional[Union[str, bool]] = "mean" , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Tuple , ) ->Optional[int]: # time series specific configuration UpperCAmelCase_ = prediction_length UpperCAmelCase_ = context_length or prediction_length UpperCAmelCase_ = distribution_output UpperCAmelCase_ = loss UpperCAmelCase_ = input_size UpperCAmelCase_ = num_time_features UpperCAmelCase_ = lags_sequence UpperCAmelCase_ = scaling UpperCAmelCase_ = num_dynamic_real_features UpperCAmelCase_ = num_static_real_features UpperCAmelCase_ = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) UpperCAmelCase_ = cardinality else: UpperCAmelCase_ = [0] if embedding_dimension and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) UpperCAmelCase_ = embedding_dimension else: UpperCAmelCase_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] UpperCAmelCase_ = num_parallel_samples # Transformer architecture configuration UpperCAmelCase_ = input_size * len(UpperCAmelCase__ ) + self._number_of_features UpperCAmelCase_ = d_model UpperCAmelCase_ = encoder_attention_heads UpperCAmelCase_ = decoder_attention_heads UpperCAmelCase_ = encoder_ffn_dim UpperCAmelCase_ = decoder_ffn_dim UpperCAmelCase_ = encoder_layers UpperCAmelCase_ = decoder_layers UpperCAmelCase_ = dropout UpperCAmelCase_ = attention_dropout UpperCAmelCase_ = activation_dropout UpperCAmelCase_ = encoder_layerdrop UpperCAmelCase_ = decoder_layerdrop UpperCAmelCase_ = activation_function UpperCAmelCase_ = init_std UpperCAmelCase_ = use_cache super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ ) @property def lowerCAmelCase__ ( self : List[str] ) ->int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
714
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowercase__ : Union[str, Any] = { "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Union[str, Any] = ["MobileViTFeatureExtractor"] lowercase__ : List[Any] = ["MobileViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Dict = [ "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTForImageClassification", "MobileViTForSemanticSegmentation", "MobileViTModel", "MobileViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Optional[int] = [ "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", "TFMobileViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys lowercase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
43
0
'''simple docstring''' import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = XLMTokenizer lowerCAmelCase__ = False def lowerCAmelCase__ ( self : int ) ->Union[str, Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) UpperCAmelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase__ ) ) def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any ) ->List[Any]: UpperCAmelCase_ = '''lower newer''' UpperCAmelCase_ = '''lower newer''' return input_text, output_text def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple: UpperCAmelCase_ = XLMTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase_ = '''lower''' UpperCAmelCase_ = ['''low''', '''er</w>'''] UpperCAmelCase_ = tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = tokens + ['''<unk>'''] UpperCAmelCase_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Any ) ->str: UpperCAmelCase_ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' ) UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
715
'''simple docstring''' lowercase__ : Union[str, Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" lowercase__ : str = [{"type": "code", "content": INSTALL_CONTENT}] lowercase__ : Any = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
43
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowercase__ : List[str] = logging.get_logger(__name__) def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int=False ): '''simple docstring''' UpperCAmelCase_ = [] # fmt: off # stem: rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') ) rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') ) rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') ) # backbone rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') ) rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') ) rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") ) rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" UpperCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) # fmt: on return rename_keys def __lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: UpperCAmelCase_ = '''''' else: UpperCAmelCase_ = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) UpperCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase_ = in_proj_bias[: config.hidden_size] UpperCAmelCase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase_ = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase_ = in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(_UpperCamelCase , _UpperCamelCase ) def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any ): '''simple docstring''' UpperCAmelCase_ = dct.pop(_UpperCamelCase ) UpperCAmelCase_ = val def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase_ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im @torch.no_grad() def __lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : int=False ): '''simple docstring''' UpperCAmelCase_ = BitConfig( global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=_UpperCamelCase , ) UpperCAmelCase_ = ViTHybridConfig(backbone_config=_UpperCamelCase , image_size=384 , num_labels=1000 ) UpperCAmelCase_ = False # load original model from timm UpperCAmelCase_ = timm.create_model(_UpperCamelCase , pretrained=_UpperCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys UpperCAmelCase_ = timm_model.state_dict() if base_model: remove_classification_head_(_UpperCamelCase ) UpperCAmelCase_ = create_rename_keys(_UpperCamelCase , _UpperCamelCase ) for src, dest in rename_keys: rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = '''huggingface/label-files''' UpperCAmelCase_ = '''imagenet-1k-id2label.json''' UpperCAmelCase_ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase_ = {int(_UpperCamelCase ): v for k, v in idalabel.items()} UpperCAmelCase_ = idalabel UpperCAmelCase_ = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": UpperCAmelCase_ = ViTHybridModel(_UpperCamelCase ).eval() else: UpperCAmelCase_ = ViTHybridForImageClassification(_UpperCamelCase ).eval() model.load_state_dict(_UpperCamelCase ) # create image processor UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=_UpperCamelCase ) ) UpperCAmelCase_ = transform.transforms UpperCAmelCase_ = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } UpperCAmelCase_ = ViTHybridImageProcessor( do_resize=_UpperCamelCase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_UpperCamelCase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_UpperCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = transform(_UpperCamelCase ).unsqueeze(0 ) UpperCAmelCase_ = processor(_UpperCamelCase , return_tensors='''pt''' ).pixel_values # verify pixel values assert torch.allclose(_UpperCamelCase , _UpperCamelCase ) # verify logits with torch.no_grad(): UpperCAmelCase_ = model(_UpperCamelCase ) UpperCAmelCase_ = outputs.logits print('''Predicted class:''' , logits.argmax(-1 ).item() ) if base_model: UpperCAmelCase_ = timm_model.forward_features(_UpperCamelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_UpperCamelCase , outputs.pooler_output , atol=1E-3 ) else: UpperCAmelCase_ = timm_model(_UpperCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_UpperCamelCase , outputs.logits , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_UpperCamelCase ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(_UpperCamelCase ) if push_to_hub: print(F"""Pushing model and processor to the hub {vit_name}""" ) model.push_to_hub(F"""ybelkada/{vit_name}""" ) processor.push_to_hub(F"""ybelkada/{vit_name}""" ) if __name__ == "__main__": lowercase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--vit_name", default="vit_base_r50_s16_384", type=str, help="Name of the hybrid ViT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) lowercase__ : List[Any] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
716
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase__ : Optional[Any] = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : List[str] = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys lowercase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
43
0
'''simple docstring''' import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer lowercase__ : Optional[int] = "bart" lowercase__ : int = True @st.cache(allow_output_mutation=_UpperCamelCase ) def __lowerCamelCase ( ): '''simple docstring''' if LOAD_DENSE_INDEX: UpperCAmelCase_ = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) UpperCAmelCase_ = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) UpperCAmelCase_ = qar_model.eval() else: UpperCAmelCase_ , UpperCAmelCase_ = (None, None) if MODEL_TYPE == "bart": UpperCAmelCase_ = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) UpperCAmelCase_ = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) UpperCAmelCase_ = sas_model.eval() else: UpperCAmelCase_ , UpperCAmelCase_ = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=_UpperCamelCase ) def __lowerCamelCase ( ): '''simple docstring''' if LOAD_DENSE_INDEX: UpperCAmelCase_ = faiss.StandardGpuResources() UpperCAmelCase_ = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] UpperCAmelCase_ = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) UpperCAmelCase_ = faiss.IndexFlatIP(128 ) UpperCAmelCase_ = faiss.index_cpu_to_gpu(_UpperCamelCase , 1 , _UpperCamelCase ) wikiaab_gpu_index_flat.add(_UpperCamelCase ) # TODO fix for larger GPU else: UpperCAmelCase_ , UpperCAmelCase_ = (None, None) UpperCAmelCase_ = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=_UpperCamelCase ) def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) UpperCAmelCase_ = elia['''train_eli5'''] UpperCAmelCase_ = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) UpperCAmelCase_ = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(_UpperCamelCase ) return (elia_train, eli5_train_q_index) lowercase__ : List[Any] = load_indexes() lowercase__ : Dict = load_models() lowercase__ : List[Any] = load_train_data() def __lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : int=10 ): '''simple docstring''' UpperCAmelCase_ = embed_questions_for_retrieval([question] , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ = eli5_train_q_index.search(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = [elia_train[int(_UpperCamelCase )] for i in I[0]] return nn_examples def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any]="wiki40b" , _UpperCamelCase : int="dense" , _UpperCamelCase : List[Any]=10 ): '''simple docstring''' if source == "none": UpperCAmelCase_ , UpperCAmelCase_ = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": UpperCAmelCase_ , UpperCAmelCase_ = query_qa_dense_index( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) else: UpperCAmelCase_ , UpperCAmelCase_ = query_es_index( _UpperCamelCase , _UpperCamelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCamelCase , ) UpperCAmelCase_ = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] UpperCAmelCase_ = '''question: {} context: {}'''.format(_UpperCamelCase , _UpperCamelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda _UpperCamelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCamelCase : None), } ) def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int]=64 , _UpperCamelCase : Dict=256 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[int]=2 , _UpperCamelCase : Any=0.95 , _UpperCamelCase : Tuple=0.8 ): '''simple docstring''' with torch.no_grad(): UpperCAmelCase_ = qa_sas_generate( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , num_answers=1 , num_beams=_UpperCamelCase , min_len=_UpperCamelCase , max_len=_UpperCamelCase , do_sample=_UpperCamelCase , temp=_UpperCamelCase , top_p=_UpperCamelCase , top_k=_UpperCamelCase , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar lowercase__ : str = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" lowercase__ : List[Any] = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia lowercase__ : int = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n" st.sidebar.markdown(description, unsafe_allow_html=True) lowercase__ : Optional[Any] = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] lowercase__ : Optional[Any] = st.sidebar.checkbox("Demo options") if demo_options: lowercase__ : int = st.sidebar.selectbox( "", action_list, index=3, ) lowercase__ : Tuple = action_list.index(action_st) lowercase__ : Union[str, Any] = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) lowercase__ : Union[str, Any] = show_type == "Show full text of passages" else: lowercase__ : Union[str, Any] = 3 lowercase__ : List[Any] = True lowercase__ : List[str] = st.sidebar.checkbox("Retrieval options") if retrieval_options: lowercase__ : str = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n " st.sidebar.markdown(retriever_info) lowercase__ : str = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) lowercase__ : int = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: lowercase__ : Optional[int] = "wiki40b" lowercase__ : str = "dense" lowercase__ : Optional[Any] = "beam" lowercase__ : Tuple = 2 lowercase__ : Optional[Any] = 64 lowercase__ : Any = 256 lowercase__ : str = None lowercase__ : Union[str, Any] = None lowercase__ : List[Any] = st.sidebar.checkbox("Generation options") if generate_options: lowercase__ : Union[str, Any] = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n " st.sidebar.markdown(generate_info) lowercase__ : List[str] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) lowercase__ : Tuple = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) lowercase__ : Union[str, Any] = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": lowercase__ : Optional[Any] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: lowercase__ : Optional[int] = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) lowercase__ : int = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) lowercase__ : List[Any] = None # start main text lowercase__ : Tuple = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] lowercase__ : int = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": lowercase__ : Dict = st.text_input("Enter your question here:", "") else: lowercase__ : Union[str, Any] = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": lowercase__ : Any = make_support(question, source=wiki_source, method="dense", n_results=10) lowercase__ : int = make_support(question, source=wiki_source, method="sparse", n_results=10) lowercase__ : int = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] lowercase__ : List[str] = support_list[:10] lowercase__ : Any = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: lowercase__ : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: lowercase__ : List[Any] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): lowercase__ : Optional[int] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) lowercase__ : List[Any] = res[1].strip() if sec_titles == "": lowercase__ : str = "[{}]({})".format(res[0], wiki_url) else: lowercase__ : Union[str, Any] = sec_titles.split(" & ") lowercase__ : Optional[int] = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( "> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: lowercase__ : Union[str, Any] = find_nearest_training(question) lowercase__ : Dict = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) lowercase__ : List[Any] = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) lowercase__ : Dict = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n" st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
717
'''simple docstring''' from heapq import heappop, heappush import numpy as np def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : bool , ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ = grid.shape UpperCAmelCase_ = [-1, 1, 0, 0] UpperCAmelCase_ = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] UpperCAmelCase_ , UpperCAmelCase_ = [(0, source)], set() UpperCAmelCase_ = np.full((rows, cols) , np.inf ) UpperCAmelCase_ = 0 UpperCAmelCase_ = np.empty((rows, cols) , dtype=_UpperCamelCase ) UpperCAmelCase_ = None while queue: ((UpperCAmelCase_) , (UpperCAmelCase_)) = heappop(_UpperCamelCase ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: UpperCAmelCase_ = [] while (x, y) != source: path.append((x, y) ) UpperCAmelCase_ , UpperCAmelCase_ = predecessors[x, y] path.append(_UpperCamelCase ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(_UpperCamelCase ) ): UpperCAmelCase_ , UpperCAmelCase_ = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: UpperCAmelCase_ = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(_UpperCamelCase , (dist + 1, (nx, ny)) ) UpperCAmelCase_ = dist + 1 UpperCAmelCase_ = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
43
0
'''simple docstring''' from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=_UpperCamelCase ) UpperCAmelCase_ = parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=_UpperCamelCase ) env_command_parser(subparsers=_UpperCamelCase ) launch_command_parser(subparsers=_UpperCamelCase ) tpu_command_parser(subparsers=_UpperCamelCase ) test_command_parser(subparsers=_UpperCamelCase ) # Let's go UpperCAmelCase_ = parser.parse_args() if not hasattr(_UpperCamelCase , '''func''' ): parser.print_help() exit(1 ) # Run args.func(_UpperCamelCase ) if __name__ == "__main__": main()
718
'''simple docstring''' import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = XLMTokenizer lowerCAmelCase__ = False def lowerCAmelCase__ ( self : int ) ->Union[str, Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) UpperCAmelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase__ ) ) def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any ) ->List[Any]: UpperCAmelCase_ = '''lower newer''' UpperCAmelCase_ = '''lower newer''' return input_text, output_text def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple: UpperCAmelCase_ = XLMTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase_ = '''lower''' UpperCAmelCase_ = ['''low''', '''er</w>'''] UpperCAmelCase_ = tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = tokens + ['''<unk>'''] UpperCAmelCase_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Any ) ->str: UpperCAmelCase_ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' ) UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
43
0
'''simple docstring''' def __lowerCamelCase ( _UpperCamelCase : int = 6008_5147_5143 ): '''simple docstring''' try: UpperCAmelCase_ = int(_UpperCamelCase ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) UpperCAmelCase_ = 1 UpperCAmelCase_ = 2 while i * i <= n: while n % i == 0: UpperCAmelCase_ = i n //= i i += 1 if n > 1: UpperCAmelCase_ = n return int(_UpperCamelCase ) if __name__ == "__main__": print(F'''{solution() = }''')
719
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ): '''simple docstring''' for param in module.parameters(): UpperCAmelCase_ = False def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCAmelCase_ = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ = plt.imshow(_UpperCamelCase ) fig.axes.get_xaxis().set_visible(_UpperCamelCase ) fig.axes.get_yaxis().set_visible(_UpperCamelCase ) plt.show() def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = datetime.now() UpperCAmelCase_ = current_time.strftime('''%H:%M:%S''' ) return timestamp
43
0
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Dict ) ->List[str]: UpperCAmelCase_ = tf.convert_to_tensor( [ [ 8.222_0991, # 3rd highest value; idx. 0 -0.562_0044, 5.2322_9752, 4.038_6393, -6.879_8378, -0.5478_5802, -3.201_2153, 2.9277_7176, 1.8817_1953, 7.3534_1276, # 5th highest value; idx. 9 8.4320_7833, # 2nd highest value; idx. 10 -9.8571_1836, -5.9620_9236, -1.1303_9161, -7.111_5294, -0.836_9633, -5.318_6408, 7.0642_7407, 0.8136_9344, -0.8202_3817, -5.917_9796, 0.5881_3443, -6.9977_8438, 4.7155_1189, -0.1877_1637, 7.4402_0759, # 4th highest value; idx. 25 9.3845_0987, # 1st highest value; idx. 26 2.1266_2941, -9.3256_2038, 2.3565_2522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.5842_5518, 4.5313_9238, -5.5751_0464, -6.2803_0699, -7.1952_9503, -4.0212_2551, 1.3933_7037, -6.0670_7057, 1.5948_0517, -9.64_3119, 0.0390_7799, 0.6723_1762, -8.8820_6726, 6.2711_5922, # 4th highest value; idx. 13 2.2852_0723, 4.8276_7506, 4.3042_1368, 8.827_5313, # 2nd highest value; idx. 17 5.4402_9958, # 5th highest value; idx. 18 -4.473_5794, 7.3857_9536, # 3rd highest value; idx. 20 -2.9105_1663, 2.6194_6077, -2.567_4762, -9.4895_9302, -4.0292_2645, -1.3541_6918, 9.6770_2323, # 1st highest value; idx. 27 -5.8947_8553, 1.8537_0467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) UpperCAmelCase_ = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above UpperCAmelCase_ = tf.convert_to_tensor( [8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above UpperCAmelCase_ = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) UpperCAmelCase_ = output[output != -float('''inf''' )] UpperCAmelCase_ = tf.cast( tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-12 ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @require_tf class lowerCamelCase ( unittest.TestCase , lowerCamelCase ): '''simple docstring''' if is_tf_available(): lowerCAmelCase__ = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]: # TF-only test: tf.saved_model export UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 2 UpperCAmelCase_ = 2 class lowerCamelCase ( tf.Module ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : List[str] ) ->Dict: super(UpperCAmelCase__ , self ).__init__() UpperCAmelCase_ = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=UpperCAmelCase__ , ) def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ) ->int: UpperCAmelCase_ = self.model.generate( input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , ) return {"sequences": outputs["sequences"]} UpperCAmelCase_ = [[2, 0], [102, 103]] UpperCAmelCase_ = [[1, 0], [1, 1]] UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} ) UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default'''] for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ): UpperCAmelCase_ = { '''input_ids''': tf.constant(dummy_input_ids[:batch_size] ), '''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ), } UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences'''] UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]: # TF-only test: tf.saved_model export UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 1 UpperCAmelCase_ = 2 class lowerCamelCase ( tf.Module ): '''simple docstring''' def __init__( self : int , UpperCAmelCase__ : int ) ->List[str]: super(UpperCAmelCase__ , self ).__init__() UpperCAmelCase_ = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=UpperCAmelCase__ , ) def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) ->int: UpperCAmelCase_ = self.model.generate( input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , ) return {"sequences": outputs["sequences"]} UpperCAmelCase_ = [[2], [102, 103]] UpperCAmelCase_ = [[1], [1, 1]] UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} ) UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default'''] for input_row in range(len(UpperCAmelCase__ ) ): UpperCAmelCase_ = { '''input_ids''': tf.constant([dummy_input_ids[input_row]] ), '''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ), } UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences'''] UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @slow @require_tensorflow_text def lowerCAmelCase__ ( self : Optional[Any] ) ->int: # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCAmelCase__ ) class lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ) ->Any: super().__init__() UpperCAmelCase_ = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , '''spiece.model''' ) , '''rb''' ).read() ) UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : str ) ->List[str]: UpperCAmelCase_ = self.tokenizer.tokenize(UpperCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = text.pad_model_inputs( UpperCAmelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) UpperCAmelCase_ = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) return self.tokenizer.detokenize(UpperCAmelCase__ ) UpperCAmelCase_ = CompleteSentenceTransformer() UpperCAmelCase_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' ) UpperCAmelCase_ = complete_model(UpperCAmelCase__ ) UpperCAmelCase_ = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ ) keras_model.save(UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple: # Has PT equivalent: this test relies on random sampling UpperCAmelCase_ = { '''do_sample''': True, '''num_beams''': 1, '''top_p''': 0.7, '''top_k''': 10, '''temperature''': 0.7, } UpperCAmelCase_ = 14 UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = '''Hello, my dog is cute and''' UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''tf''' ) UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) UpperCAmelCase_ = [638, 198] with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]: # Has PT equivalent: ample use of framework-specific code UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = '''Hugging Face is a technology company based in New York and Paris.''' UpperCAmelCase_ = bart_tokenizer(UpperCAmelCase__ , return_tensors='''tf''' ).input_ids UpperCAmelCase_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy() class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int ) ->List[str]: return super().call(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ , foo='''bar''' ).numpy() self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) ) class lowerCamelCase ( bart_model.model.encoder.__class__ ): '''simple docstring''' def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict ) ->Any: return super().call(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = FakeEncoder(bart_model.config , bart_model.model.shared ) UpperCAmelCase_ = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy() with self.assertRaises(UpperCAmelCase__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(UpperCAmelCase__ , foo='''bar''' )
720
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Any ) ->Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self : str ) ->List[str]: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_euler''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase__ ( self : List[str] ) ->int: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_euler''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=UpperCAmelCase__ , ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array( [0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
43
0
'''simple docstring''' import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() lowercase__ : str = logging.get_logger(__name__) def __lowerCamelCase ( _UpperCamelCase : str ): '''simple docstring''' UpperCAmelCase_ = SwinConfig.from_pretrained( '''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) UpperCAmelCase_ = MaskFormerConfig(backbone_config=_UpperCamelCase ) UpperCAmelCase_ = '''huggingface/label-files''' if "ade20k-full" in model_name: # this should be ok UpperCAmelCase_ = 847 UpperCAmelCase_ = '''maskformer-ade20k-full-id2label.json''' elif "ade" in model_name: # this should be ok UpperCAmelCase_ = 150 UpperCAmelCase_ = '''ade20k-id2label.json''' elif "coco-stuff" in model_name: # this should be ok UpperCAmelCase_ = 171 UpperCAmelCase_ = '''maskformer-coco-stuff-id2label.json''' elif "coco" in model_name: # TODO UpperCAmelCase_ = 133 UpperCAmelCase_ = '''coco-panoptic-id2label.json''' elif "cityscapes" in model_name: # this should be ok UpperCAmelCase_ = 19 UpperCAmelCase_ = '''cityscapes-id2label.json''' elif "vistas" in model_name: # this should be ok UpperCAmelCase_ = 65 UpperCAmelCase_ = '''mapillary-vistas-id2label.json''' UpperCAmelCase_ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase_ = {int(_UpperCamelCase ): v for k, v in idalabel.items()} return config def __lowerCamelCase ( _UpperCamelCase : Any ): '''simple docstring''' UpperCAmelCase_ = [] # stem # fmt: off rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") ) # FPN rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') ) rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') ) rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") ) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") ) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") ) rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') ) rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") ) # cross-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") ) # MLP 1 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") ) # MLP 2 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") ) # layernorm 1 (self-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") ) # layernorm 3 (final layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") ) rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') ) rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') ) # heads on top rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') ) rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') ) rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') ) rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') ) rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') ) for i in range(3 ): rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") ) # fmt: on return rename_keys def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ): '''simple docstring''' UpperCAmelCase_ = dct.pop(_UpperCamelCase ) UpperCAmelCase_ = val def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): UpperCAmelCase_ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" ) UpperCAmelCase_ = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[:dim, :] UpperCAmelCase_ = in_proj_bias[: dim] UpperCAmelCase_ = in_proj_weight[ dim : dim * 2, : ] UpperCAmelCase_ = in_proj_bias[ dim : dim * 2 ] UpperCAmelCase_ = in_proj_weight[ -dim :, : ] UpperCAmelCase_ = in_proj_bias[-dim :] # fmt: on def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" ) UpperCAmelCase_ = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[: hidden_size, :] UpperCAmelCase_ = in_proj_bias[:config.hidden_size] UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :] UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2] UpperCAmelCase_ = in_proj_weight[-hidden_size :, :] UpperCAmelCase_ = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" ) UpperCAmelCase_ = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[: hidden_size, :] UpperCAmelCase_ = in_proj_bias[:config.hidden_size] UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :] UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2] UpperCAmelCase_ = in_proj_weight[-hidden_size :, :] UpperCAmelCase_ = in_proj_bias[-hidden_size :] # fmt: on def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase_ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im @torch.no_grad() def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : bool = False ): '''simple docstring''' UpperCAmelCase_ = get_maskformer_config(_UpperCamelCase ) # load original state_dict with open(_UpperCamelCase , '''rb''' ) as f: UpperCAmelCase_ = pickle.load(_UpperCamelCase ) UpperCAmelCase_ = data['''model'''] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys UpperCAmelCase_ = create_rename_keys(_UpperCamelCase ) for src, dest in rename_keys: rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) read_in_swin_q_k_v(_UpperCamelCase , config.backbone_config ) read_in_decoder_q_k_v(_UpperCamelCase , _UpperCamelCase ) # update to torch tensors for key, value in state_dict.items(): UpperCAmelCase_ = torch.from_numpy(_UpperCamelCase ) # load 🤗 model UpperCAmelCase_ = MaskFormerForInstanceSegmentation(_UpperCamelCase ) model.eval() for name, param in model.named_parameters(): print(_UpperCamelCase , param.shape ) UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(_UpperCamelCase ) == 0, F"""Unexpected keys: {unexpected_keys}""" # verify results UpperCAmelCase_ = prepare_img() if "vistas" in model_name: UpperCAmelCase_ = 65 elif "cityscapes" in model_name: UpperCAmelCase_ = 6_5535 else: UpperCAmelCase_ = 255 UpperCAmelCase_ = True if '''ade''' in model_name else False UpperCAmelCase_ = MaskFormerImageProcessor(ignore_index=_UpperCamelCase , reduce_labels=_UpperCamelCase ) UpperCAmelCase_ = image_processor(_UpperCamelCase , return_tensors='''pt''' ) UpperCAmelCase_ = model(**_UpperCamelCase ) print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": UpperCAmelCase_ = torch.tensor( [[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" ) Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) model.save_pretrained(_UpperCamelCase ) image_processor.save_pretrained(_UpperCamelCase ) if push_to_hub: print('''Pushing model and image processor to the hub...''' ) model.push_to_hub(F"""nielsr/{model_name}""" ) image_processor.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": lowercase__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="maskformer-swin-tiny-ade", type=str, help=("Name of the MaskFormer model you'd like to convert",), ) parser.add_argument( "--checkpoint_path", default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl", type=str, help="Path to the original state dict (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowercase__ : Any = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
721
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = MvpTokenizer lowerCAmelCase__ = MvpTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = filter_roberta_detectors def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple: super().setUp() UpperCAmelCase_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) ) UpperCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] UpperCAmelCase_ = {'''unk_token''': '''<unk>'''} UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase__ ) ) def lowerCAmelCase__ ( self : Tuple , **UpperCAmelCase__ : List[str] ) ->Dict: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Optional[int] , **UpperCAmelCase__ : int ) ->Tuple: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ) ->Union[str, Any]: return "lower newer", "lower newer" @cached_property def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]: return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' ) @cached_property def lowerCAmelCase__ ( self : Tuple ) ->Tuple: return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' ) @require_torch def lowerCAmelCase__ ( self : Any ) ->Dict: UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] UpperCAmelCase_ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , return_tensors='''pt''' ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) UpperCAmelCase_ = batch.input_ids.tolist()[0] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # Test that special tokens are reset @require_torch def lowerCAmelCase__ ( self : str ) ->int: UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='''pt''' ) # check if input_ids are returned and no labels self.assertIn('''input_ids''' , UpperCAmelCase__ ) self.assertIn('''attention_mask''' , UpperCAmelCase__ ) self.assertNotIn('''labels''' , UpperCAmelCase__ ) self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase__ ) @require_torch def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]: UpperCAmelCase_ = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(text_target=UpperCAmelCase__ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def lowerCAmelCase__ ( self : List[str] ) ->int: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer( ['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='''pt''' ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(batch.input_ids.shape , (2, 1024) ) @require_torch def lowerCAmelCase__ ( self : Dict ) ->Optional[int]: UpperCAmelCase_ = ['''A long paragraph for summarization.'''] UpperCAmelCase_ = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , text_target=UpperCAmelCase__ , return_tensors='''pt''' ) UpperCAmelCase_ = inputs['''input_ids'''] UpperCAmelCase_ = inputs['''labels'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def lowerCAmelCase__ ( self : str ) ->Optional[Any]: pass def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = '''A, <mask> AllenNLP sentence.''' UpperCAmelCase_ = tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ ) UpperCAmelCase_ = tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
43
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowercase__ : Tuple = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : List[Any] = ["DeiTFeatureExtractor"] lowercase__ : Any = ["DeiTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Any = [ "DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "DeiTForImageClassification", "DeiTForImageClassificationWithTeacher", "DeiTForMaskedImageModeling", "DeiTModel", "DeiTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Tuple = [ "TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher", "TFDeiTForMaskedImageModeling", "TFDeiTModel", "TFDeiTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys lowercase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
700
'''simple docstring''' def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] ): '''simple docstring''' UpperCAmelCase_ = '''''' for word_or_phrase in separated: if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise Exception('''join() accepts only strings to be joined''' ) joined += word_or_phrase + separator return joined.strip(_UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
43
0
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 lowercase__ : str = get_tests_dir("fixtures/dummy_feature_extractor_config.json") lowercase__ : List[str] = get_tests_dir("fixtures/vocab.json") lowercase__ : int = get_tests_dir("fixtures") class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def lowerCAmelCase__ ( self : Dict ) ->Any: UpperCAmelCase_ = 0 def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]: UpperCAmelCase_ = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Optional[Any] ) ->str: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = WavaVecaConfig() UpperCAmelCase_ = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(UpperCAmelCase__ ) processor.save_pretrained(UpperCAmelCase__ ) UpperCAmelCase_ = AutoProcessor.from_pretrained(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Union[str, Any] ) ->int: with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) ) copyfile(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , '''vocab.json''' ) ) UpperCAmelCase_ = AutoProcessor.from_pretrained(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Dict ) ->Optional[int]: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = WavaVecaFeatureExtractor() UpperCAmelCase_ = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) UpperCAmelCase_ = WavaVecaProcessor(UpperCAmelCase__ , UpperCAmelCase__ ) # save in new folder processor.save_pretrained(UpperCAmelCase__ ) # drop `processor_class` in tokenizer with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , '''r''' ) as f: UpperCAmelCase_ = json.load(UpperCAmelCase__ ) config_dict.pop('''processor_class''' ) with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , '''w''' ) as f: f.write(json.dumps(UpperCAmelCase__ ) ) UpperCAmelCase_ = AutoProcessor.from_pretrained(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Any ) ->Optional[Any]: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = WavaVecaFeatureExtractor() UpperCAmelCase_ = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) UpperCAmelCase_ = WavaVecaProcessor(UpperCAmelCase__ , UpperCAmelCase__ ) # save in new folder processor.save_pretrained(UpperCAmelCase__ ) # drop `processor_class` in feature extractor with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , '''r''' ) as f: UpperCAmelCase_ = json.load(UpperCAmelCase__ ) config_dict.pop('''processor_class''' ) with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , '''w''' ) as f: f.write(json.dumps(UpperCAmelCase__ ) ) UpperCAmelCase_ = AutoProcessor.from_pretrained(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Optional[int] ) ->Dict: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(UpperCAmelCase__ ) # copy relevant files copyfile(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , '''w''' ) as f: f.write('''{}''' ) UpperCAmelCase_ = AutoProcessor.from_pretrained(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(UpperCAmelCase__ ): UpperCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCAmelCase__ ): UpperCAmelCase_ = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__ ) UpperCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__ ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) UpperCAmelCase_ = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) UpperCAmelCase_ = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version UpperCAmelCase_ = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__ , use_fast=UpperCAmelCase__ ) UpperCAmelCase_ = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]: try: AutoConfig.register('''custom''' , UpperCAmelCase__ ) AutoFeatureExtractor.register(UpperCAmelCase__ , UpperCAmelCase__ ) AutoTokenizer.register(UpperCAmelCase__ , slow_tokenizer_class=UpperCAmelCase__ ) AutoProcessor.register(UpperCAmelCase__ , UpperCAmelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCAmelCase__ ): AutoProcessor.register(UpperCAmelCase__ , UpperCAmelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase_ = CustomFeatureExtractor.from_pretrained(UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ = os.path.join(UpperCAmelCase__ , '''vocab.txt''' ) with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) UpperCAmelCase_ = CustomTokenizer(UpperCAmelCase__ ) UpperCAmelCase_ = CustomProcessor(UpperCAmelCase__ , UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(UpperCAmelCase__ ) UpperCAmelCase_ = AutoProcessor.from_pretrained(UpperCAmelCase__ ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def lowerCAmelCase__ ( self : Any ) ->List[str]: class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = False class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = False class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''AutoFeatureExtractor''' lowerCAmelCase__ = '''AutoTokenizer''' lowerCAmelCase__ = False try: AutoConfig.register('''custom''' , UpperCAmelCase__ ) AutoFeatureExtractor.register(UpperCAmelCase__ , UpperCAmelCase__ ) AutoTokenizer.register(UpperCAmelCase__ , slow_tokenizer_class=UpperCAmelCase__ ) AutoProcessor.register(UpperCAmelCase__ , UpperCAmelCase__ ) # If remote code is not set, the default is to use local classes. UpperCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. UpperCAmelCase_ = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__ ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. UpperCAmelCase_ = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__ ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]: UpperCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]: UpperCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def lowerCAmelCase__ ( cls : Dict ) ->str: UpperCAmelCase_ = TOKEN HfFolder.save_token(UpperCAmelCase__ ) @classmethod def lowerCAmelCase__ ( cls : Dict ) ->int: try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def lowerCAmelCase__ ( self : Any ) ->str: UpperCAmelCase_ = WavaVecaProcessor.from_pretrained(UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(UpperCAmelCase__ , '''test-processor''' ) , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token ) UpperCAmelCase_ = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(UpperCAmelCase__ , getattr(new_processor.feature_extractor , UpperCAmelCase__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def lowerCAmelCase__ ( self : str ) ->Any: UpperCAmelCase_ = WavaVecaProcessor.from_pretrained(UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(UpperCAmelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token , organization='''valid_org''' , ) UpperCAmelCase_ = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(UpperCAmelCase__ , getattr(new_processor.feature_extractor , UpperCAmelCase__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def lowerCAmelCase__ ( self : List[Any] ) ->Tuple: CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() UpperCAmelCase_ = CustomFeatureExtractor.from_pretrained(UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ = os.path.join(UpperCAmelCase__ , '''vocab.txt''' ) with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) UpperCAmelCase_ = CustomTokenizer(UpperCAmelCase__ ) UpperCAmelCase_ = CustomProcessor(UpperCAmelCase__ , UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token ) UpperCAmelCase_ = Repository(UpperCAmelCase__ , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token ) processor.save_pretrained(UpperCAmelCase__ ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(UpperCAmelCase__ , '''tokenizer_config.json''' ) ) as f: UpperCAmelCase_ = json.load(UpperCAmelCase__ ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase__ , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase__ , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase__ , '''custom_processing.py''' ) ) ) repo.push_to_hub() UpperCAmelCase_ = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=UpperCAmelCase__ ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
701
'''simple docstring''' import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowercase__ : Optional[int] = logging.get_logger(__name__) def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, Iterable[int]] , _UpperCamelCase : bool , _UpperCamelCase : int ): '''simple docstring''' def constraint_to_multiple_of(_UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : str=None ): UpperCAmelCase_ = round(val / multiple ) * multiple if max_val is not None and x > max_val: UpperCAmelCase_ = math.floor(val / multiple ) * multiple if x < min_val: UpperCAmelCase_ = math.ceil(val / multiple ) * multiple return x UpperCAmelCase_ = (output_size, output_size) if isinstance(_UpperCamelCase , _UpperCamelCase ) else output_size UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(_UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ = output_size # determine new height and width UpperCAmelCase_ = output_height / input_height UpperCAmelCase_ = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width UpperCAmelCase_ = scale_width else: # fit height UpperCAmelCase_ = scale_height UpperCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=_UpperCamelCase ) UpperCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=_UpperCamelCase ) return (new_height, new_width) class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = ['''pixel_values'''] def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : str , ) ->None: super().__init__(**UpperCAmelCase__ ) UpperCAmelCase_ = size if size is not None else {'''height''': 384, '''width''': 384} UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) UpperCAmelCase_ = do_resize UpperCAmelCase_ = size UpperCAmelCase_ = keep_aspect_ratio UpperCAmelCase_ = ensure_multiple_of UpperCAmelCase_ = resample UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[str] , ) ->np.ndarray: UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) UpperCAmelCase_ = get_resize_output_image_size( UpperCAmelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCAmelCase__ , multiple=UpperCAmelCase__ , ) return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) ->Any: return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) ->np.ndarray: return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : Any , ) ->PIL.Image.Image: UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ = size if size is not None else self.size UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) UpperCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio UpperCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of UpperCAmelCase_ = resample if resample is not None else self.resample UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ = image_std if image_std is not None else self.image_std UpperCAmelCase_ = make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(UpperCAmelCase__ ) for image in images] if do_resize: UpperCAmelCase_ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images] if do_normalize: UpperCAmelCase_ = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images] UpperCAmelCase_ = {'''pixel_values''': images} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ ) def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None ) ->Optional[Any]: UpperCAmelCase_ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(UpperCAmelCase__ ): UpperCAmelCase_ = target_sizes.numpy() UpperCAmelCase_ = [] for idx in range(len(UpperCAmelCase__ ) ): UpperCAmelCase_ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCAmelCase__ ) UpperCAmelCase_ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCAmelCase__ ) else: UpperCAmelCase_ = logits.argmax(dim=1 ) UpperCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
43
0
'''simple docstring''' def __lowerCamelCase ( _UpperCamelCase : int ): '''simple docstring''' if number < 0: raise ValueError('''number must not be negative''' ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
702
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : int = logging.get_logger(__name__) lowercase__ : List[str] = { "huggingface/time-series-transformer-tourism-monthly": ( "https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''time_series_transformer''' lowerCAmelCase__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "student_t" , UpperCAmelCase__ : str = "nll" , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase__ : Optional[Union[str, bool]] = "mean" , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Tuple , ) ->Optional[int]: # time series specific configuration UpperCAmelCase_ = prediction_length UpperCAmelCase_ = context_length or prediction_length UpperCAmelCase_ = distribution_output UpperCAmelCase_ = loss UpperCAmelCase_ = input_size UpperCAmelCase_ = num_time_features UpperCAmelCase_ = lags_sequence UpperCAmelCase_ = scaling UpperCAmelCase_ = num_dynamic_real_features UpperCAmelCase_ = num_static_real_features UpperCAmelCase_ = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) UpperCAmelCase_ = cardinality else: UpperCAmelCase_ = [0] if embedding_dimension and num_static_categorical_features > 0: if len(UpperCAmelCase__ ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) UpperCAmelCase_ = embedding_dimension else: UpperCAmelCase_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] UpperCAmelCase_ = num_parallel_samples # Transformer architecture configuration UpperCAmelCase_ = input_size * len(UpperCAmelCase__ ) + self._number_of_features UpperCAmelCase_ = d_model UpperCAmelCase_ = encoder_attention_heads UpperCAmelCase_ = decoder_attention_heads UpperCAmelCase_ = encoder_ffn_dim UpperCAmelCase_ = decoder_ffn_dim UpperCAmelCase_ = encoder_layers UpperCAmelCase_ = decoder_layers UpperCAmelCase_ = dropout UpperCAmelCase_ = attention_dropout UpperCAmelCase_ = activation_dropout UpperCAmelCase_ = encoder_layerdrop UpperCAmelCase_ = decoder_layerdrop UpperCAmelCase_ = activation_function UpperCAmelCase_ = init_std UpperCAmelCase_ = use_cache super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ ) @property def lowerCAmelCase__ ( self : List[str] ) ->int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
43
0
'''simple docstring''' from __future__ import annotations def __lowerCamelCase ( _UpperCamelCase : list[int] ): '''simple docstring''' if len(_UpperCamelCase ) == 0: return array UpperCAmelCase_ , UpperCAmelCase_ = min(_UpperCamelCase ), max(_UpperCamelCase ) # Compute the variables UpperCAmelCase_ = _max - _min + 1 UpperCAmelCase_ , UpperCAmelCase_ = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: UpperCAmelCase_ = i - _min UpperCAmelCase_ = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. UpperCAmelCase_ = 0 for i in range(_UpperCamelCase ): while holes_repeat[i] > 0: UpperCAmelCase_ = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() lowercase__ : Dict = input("Enter numbers separated by comma:\n") lowercase__ : Tuple = [int(x) for x in user_input.split(",")] print(pigeon_sort(unsorted))
703
'''simple docstring''' from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig lowercase__ : Dict = logging.get_logger(__name__) lowercase__ : List[Any] = "T5Config" class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''mt5''' lowerCAmelCase__ = MTaConfig class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''mt5''' lowerCAmelCase__ = MTaConfig class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = '''mt5''' lowerCAmelCase__ = MTaConfig
43
0
'''simple docstring''' from math import ceil def __lowerCamelCase ( _UpperCamelCase : int = 1001 ): '''simple docstring''' UpperCAmelCase_ = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): UpperCAmelCase_ = 2 * i + 1 UpperCAmelCase_ = 2 * i UpperCAmelCase_ = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: lowercase__ : Dict = int(sys.argv[1]) print(solution(n)) except ValueError: print("Invalid entry - please enter a number")
704
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets lowercase__ : str = datasets.logging.get_logger(__name__) lowercase__ : Dict = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n" lowercase__ : str = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n" lowercase__ : str = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase__ ( self : List[Any] ) ->Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''sources''': datasets.Value('''string''' , id='''sequence''' ), '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[ '''https://github.com/Unbabel/COMET''', '''https://www.aclweb.org/anthology/2020.emnlp-main.213/''', '''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''', ] , ) def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Union[str, Any] ) ->Any: if self.config_name == "default": UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) ) else: UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=False ) ->Optional[Any]: if gpus is None: UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0 UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references} UpperCAmelCase_ = [dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) for t in zip(*data.values() )] UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(UpperCAmelCase__ , gpus=UpperCAmelCase__ , progress_bar=UpperCAmelCase__ ) return {"mean_score": mean_score, "scores": scores}
43
0
'''simple docstring''' import math def __lowerCamelCase ( _UpperCamelCase : int ): '''simple docstring''' return math.sqrt(_UpperCamelCase ) * math.sqrt(_UpperCamelCase ) == num def __lowerCamelCase ( _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = 0 UpperCAmelCase_ = n while left <= right: UpperCAmelCase_ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: UpperCAmelCase_ = mid - 1 else: UpperCAmelCase_ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
705
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Union[str, Any]=400 , UpperCAmelCase__ : List[Any]=3 , ) ->Dict: UpperCAmelCase_ = parent UpperCAmelCase_ = do_resize UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288} UpperCAmelCase_ = size_divisor UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = do_center_crop UpperCAmelCase_ = image_mean UpperCAmelCase_ = image_std UpperCAmelCase_ = do_pad UpperCAmelCase_ = batch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = min_resolution UpperCAmelCase_ = max_resolution def lowerCAmelCase__ ( self : Tuple ) ->List[Any]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=False ) ->Any: if not batched: UpperCAmelCase_ = self.size['''shortest_edge'''] UpperCAmelCase_ = image_inputs[0] if isinstance(UpperCAmelCase__ , Image.Image ): UpperCAmelCase_ , UpperCAmelCase_ = image.size else: UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2] UpperCAmelCase_ = size / min(UpperCAmelCase__ , UpperCAmelCase__ ) if h < w: UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w else: UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size UpperCAmelCase_ = int((1333 / 800) * size ) if max(UpperCAmelCase__ , UpperCAmelCase__ ) > max_size: UpperCAmelCase_ = max_size / max(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCAmelCase_ = newh * scale UpperCAmelCase_ = neww * scale UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5 ), int(neww + 0.5 ) UpperCAmelCase_ , UpperCAmelCase_ = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: UpperCAmelCase_ = [] for image in image_inputs: UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0] UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase ( lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = BridgeTowerImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self : Optional[int] ) ->str: UpperCAmelCase_ = BridgeTowerImageProcessingTester(self ) @property def lowerCAmelCase__ ( self : List[str] ) ->Dict: return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self : Optional[int] ) ->int: UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''size_divisor''' ) ) def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]: pass def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self : Any ) ->Optional[int]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self : int ) ->List[str]: # Initialize image processor UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
43
0
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time lowercase__ : str = Lock() def __lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] ): '''simple docstring''' global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_UpperCamelCase ) process_lock.release() # receive your right neighbor's value process_lock.acquire() UpperCAmelCase_ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left UpperCAmelCase_ = min(_UpperCamelCase , _UpperCamelCase ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_UpperCamelCase ) process_lock.release() # receive your left neighbor's value process_lock.acquire() UpperCAmelCase_ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right UpperCAmelCase_ = max(_UpperCamelCase , _UpperCamelCase ) # after all swaps are performed, send the values back to main result_pipe[1].send(_UpperCamelCase ) def __lowerCamelCase ( _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = [] UpperCAmelCase_ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop UpperCAmelCase_ = Pipe() UpperCAmelCase_ = Pipe() process_array_.append( Process( target=_UpperCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) UpperCAmelCase_ = temp_rs UpperCAmelCase_ = temp_rr for i in range(1 , len(_UpperCamelCase ) - 1 ): UpperCAmelCase_ = Pipe() UpperCAmelCase_ = Pipe() process_array_.append( Process( target=_UpperCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) UpperCAmelCase_ = temp_rs UpperCAmelCase_ = temp_rr process_array_.append( Process( target=_UpperCamelCase , args=( len(_UpperCamelCase ) - 1, arr[len(_UpperCamelCase ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_UpperCamelCase ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_UpperCamelCase ) ): UpperCAmelCase_ = result_pipe[p][0].recv() process_array_[p].join() return arr def __lowerCamelCase ( ): '''simple docstring''' UpperCAmelCase_ = list(range(10 , 0 , -1 ) ) print('''Initial List''' ) print(*_UpperCamelCase ) UpperCAmelCase_ = odd_even_transposition(_UpperCamelCase ) print('''Sorted List\n''' ) print(*_UpperCamelCase ) if __name__ == "__main__": main()
706
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Dict ) ->List[str]: UpperCAmelCase_ = tf.convert_to_tensor( [ [ 8.222_0991, # 3rd highest value; idx. 0 -0.562_0044, 5.2322_9752, 4.038_6393, -6.879_8378, -0.5478_5802, -3.201_2153, 2.9277_7176, 1.8817_1953, 7.3534_1276, # 5th highest value; idx. 9 8.4320_7833, # 2nd highest value; idx. 10 -9.8571_1836, -5.9620_9236, -1.1303_9161, -7.111_5294, -0.836_9633, -5.318_6408, 7.0642_7407, 0.8136_9344, -0.8202_3817, -5.917_9796, 0.5881_3443, -6.9977_8438, 4.7155_1189, -0.1877_1637, 7.4402_0759, # 4th highest value; idx. 25 9.3845_0987, # 1st highest value; idx. 26 2.1266_2941, -9.3256_2038, 2.3565_2522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.5842_5518, 4.5313_9238, -5.5751_0464, -6.2803_0699, -7.1952_9503, -4.0212_2551, 1.3933_7037, -6.0670_7057, 1.5948_0517, -9.64_3119, 0.0390_7799, 0.6723_1762, -8.8820_6726, 6.2711_5922, # 4th highest value; idx. 13 2.2852_0723, 4.8276_7506, 4.3042_1368, 8.827_5313, # 2nd highest value; idx. 17 5.4402_9958, # 5th highest value; idx. 18 -4.473_5794, 7.3857_9536, # 3rd highest value; idx. 20 -2.9105_1663, 2.6194_6077, -2.567_4762, -9.4895_9302, -4.0292_2645, -1.3541_6918, 9.6770_2323, # 1st highest value; idx. 27 -5.8947_8553, 1.8537_0467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) UpperCAmelCase_ = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above UpperCAmelCase_ = tf.convert_to_tensor( [8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above UpperCAmelCase_ = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) UpperCAmelCase_ = output[output != -float('''inf''' )] UpperCAmelCase_ = tf.cast( tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-12 ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @require_tf class lowerCamelCase ( unittest.TestCase , lowerCamelCase ): '''simple docstring''' if is_tf_available(): lowerCAmelCase__ = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]: # TF-only test: tf.saved_model export UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 2 UpperCAmelCase_ = 2 class lowerCamelCase ( tf.Module ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase__ : List[str] ) ->Dict: super(UpperCAmelCase__ , self ).__init__() UpperCAmelCase_ = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=UpperCAmelCase__ , ) def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ) ->int: UpperCAmelCase_ = self.model.generate( input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , ) return {"sequences": outputs["sequences"]} UpperCAmelCase_ = [[2, 0], [102, 103]] UpperCAmelCase_ = [[1, 0], [1, 1]] UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} ) UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default'''] for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ): UpperCAmelCase_ = { '''input_ids''': tf.constant(dummy_input_ids[:batch_size] ), '''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ), } UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences'''] UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @slow def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]: # TF-only test: tf.saved_model export UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 1 UpperCAmelCase_ = 2 class lowerCamelCase ( tf.Module ): '''simple docstring''' def __init__( self : int , UpperCAmelCase__ : int ) ->List[str]: super(UpperCAmelCase__ , self ).__init__() UpperCAmelCase_ = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=UpperCAmelCase__ , ) def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) ->int: UpperCAmelCase_ = self.model.generate( input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , ) return {"sequences": outputs["sequences"]} UpperCAmelCase_ = [[2], [102, 103]] UpperCAmelCase_ = [[1], [1, 1]] UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} ) UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default'''] for input_row in range(len(UpperCAmelCase__ ) ): UpperCAmelCase_ = { '''input_ids''': tf.constant([dummy_input_ids[input_row]] ), '''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ), } UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences'''] UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ ) tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ ) @slow @require_tensorflow_text def lowerCAmelCase__ ( self : Optional[Any] ) ->int: # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCAmelCase__ ) class lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] ) ->Any: super().__init__() UpperCAmelCase_ = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , '''spiece.model''' ) , '''rb''' ).read() ) UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : str ) ->List[str]: UpperCAmelCase_ = self.tokenizer.tokenize(UpperCAmelCase__ ) UpperCAmelCase_ , UpperCAmelCase_ = text.pad_model_inputs( UpperCAmelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) UpperCAmelCase_ = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ) return self.tokenizer.detokenize(UpperCAmelCase__ ) UpperCAmelCase_ = CompleteSentenceTransformer() UpperCAmelCase_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' ) UpperCAmelCase_ = complete_model(UpperCAmelCase__ ) UpperCAmelCase_ = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ ) keras_model.save(UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple: # Has PT equivalent: this test relies on random sampling UpperCAmelCase_ = { '''do_sample''': True, '''num_beams''': 1, '''top_p''': 0.7, '''top_k''': 10, '''temperature''': 0.7, } UpperCAmelCase_ = 14 UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = '''Hello, my dog is cute and''' UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''tf''' ) UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) UpperCAmelCase_ = [638, 198] with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]: # Has PT equivalent: ample use of framework-specific code UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = '''Hugging Face is a technology company based in New York and Paris.''' UpperCAmelCase_ = bart_tokenizer(UpperCAmelCase__ , return_tensors='''tf''' ).input_ids UpperCAmelCase_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy() class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int ) ->List[str]: return super().call(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ , foo='''bar''' ).numpy() self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) ) class lowerCamelCase ( bart_model.model.encoder.__class__ ): '''simple docstring''' def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict ) ->Any: return super().call(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase_ = FakeEncoder(bart_model.config , bart_model.model.shared ) UpperCAmelCase_ = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy() with self.assertRaises(UpperCAmelCase__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(UpperCAmelCase__ , foo='''bar''' )
43
0