code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): a__ : Any ='''pt''' elif is_tf_available(): a__ : Tuple ='''tf''' else: a__ : List[str] ='''jax''' class snake_case ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =PerceiverTokenizer SCREAMING_SNAKE_CASE_ : Optional[int] =False def _lowerCamelCase ( self : int ): super().setUp() __UpperCamelCase = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _lowerCamelCase ( self : Optional[Any] ): return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def _lowerCamelCase ( self : str , **__A : Tuple ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **__A ) def _lowerCamelCase ( self : List[str] , __A : Union[str, Any] , __A : Dict=False , __A : Any=2_0 , __A : Tuple=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __UpperCamelCase = [] for i in range(len(__A ) ): try: __UpperCamelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=__A ) except UnicodeDecodeError: pass toks.append((i, tok) ) __UpperCamelCase = list(filter(lambda __A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , __A ) ) __UpperCamelCase = list(filter(lambda __A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__A ) , __A ) ) if max_length is not None and len(__A ) > max_length: __UpperCamelCase = toks[:max_length] if min_length is not None and len(__A ) < min_length and len(__A ) > 0: while len(__A ) < min_length: __UpperCamelCase = toks + toks # toks_str = [t[1] for t in toks] __UpperCamelCase = [t[0] for t in toks] # Ensure consistency __UpperCamelCase = tokenizer.decode(__A , clean_up_tokenization_spaces=__A ) if " " not in output_txt and len(__A ) > 1: __UpperCamelCase = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__A ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__A ) ) if with_prefix_space: __UpperCamelCase = ' ' + output_txt __UpperCamelCase = tokenizer.encode(__A , add_special_tokens=__A ) return output_txt, output_ids def _lowerCamelCase ( self : Union[str, Any] ): __UpperCamelCase = self.perceiver_tokenizer __UpperCamelCase = 'Unicode €.' __UpperCamelCase = tokenizer(__A ) __UpperCamelCase = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5] self.assertEqual(encoded['input_ids'] , __A ) # decoding __UpperCamelCase = tokenizer.decode(__A ) self.assertEqual(__A , '[CLS]Unicode €.[SEP]' ) __UpperCamelCase = tokenizer('e è é ê ë' ) __UpperCamelCase = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5] self.assertEqual(encoded['input_ids'] , __A ) # decoding __UpperCamelCase = tokenizer.decode(__A ) self.assertEqual(__A , '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = self.perceiver_tokenizer __UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off __UpperCamelCase = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0] # fmt: on __UpperCamelCase = tokenizer(__A , padding=__A , return_tensors=__A ) self.assertIsInstance(__A , __A ) if FRAMEWORK != "jax": __UpperCamelCase = list(batch.input_ids.numpy()[0] ) else: __UpperCamelCase = list(batch.input_ids.tolist()[0] ) self.assertListEqual(__A , __A ) self.assertEqual((2, 3_8) , batch.input_ids.shape ) self.assertEqual((2, 3_8) , batch.attention_mask.shape ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = self.perceiver_tokenizer __UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __UpperCamelCase = tokenizer(__A , padding=__A , return_tensors=__A ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , __A ) self.assertIn('attention_mask' , __A ) self.assertNotIn('decoder_input_ids' , __A ) self.assertNotIn('decoder_attention_mask' , __A ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = self.perceiver_tokenizer __UpperCamelCase = [ 'Summary of the text.', 'Another summary.', ] __UpperCamelCase = tokenizer( text_target=__A , max_length=3_2 , padding='max_length' , truncation=__A , return_tensors=__A ) self.assertEqual(3_2 , targets['input_ids'].shape[1] ) def _lowerCamelCase ( self : Dict ): # safety check on max_len default value so we are sure the test works __UpperCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test __UpperCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = ' He is very happy, UNwant\u00E9d,running' __UpperCamelCase = tokenizer.encode(__A , add_special_tokens=__A ) tokenizer.save_pretrained(__A ) __UpperCamelCase = tokenizer.__class__.from_pretrained(__A ) __UpperCamelCase = after_tokenizer.encode(__A , add_special_tokens=__A ) self.assertListEqual(__A , __A ) shutil.rmtree(__A ) __UpperCamelCase = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) __UpperCamelCase = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) __UpperCamelCase = tokenizer.encode(__A , add_special_tokens=__A ) tokenizer.save_pretrained(__A ) __UpperCamelCase = tokenizer.__class__.from_pretrained(__A ) __UpperCamelCase = after_tokenizer.encode(__A , add_special_tokens=__A ) self.assertListEqual(__A , __A ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) __UpperCamelCase = tokenizer.__class__.from_pretrained(__A , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(__A ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__A ) with open(os.path.join(__A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: __UpperCamelCase = json.load(__A ) with open(os.path.join(__A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: __UpperCamelCase = json.load(__A ) __UpperCamelCase = [f'''<extra_id_{i}>''' for i in range(1_2_5 )] __UpperCamelCase = added_tokens_extra_ids + [ 'an_additional_special_token' ] __UpperCamelCase = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(__A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(__A , __A ) with open(os.path.join(__A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(__A , __A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __UpperCamelCase = tokenizer_class.from_pretrained( __A , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __UpperCamelCase = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=__A )] __UpperCamelCase = tokenizer_class.from_pretrained( __A , additional_special_tokens=__A , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_7_8] ) , '�' ) def _lowerCamelCase ( self : Dict ): pass def _lowerCamelCase ( self : List[str] ): pass def _lowerCamelCase ( self : Tuple ): pass def _lowerCamelCase ( self : Optional[Any] ): pass def _lowerCamelCase ( self : str ): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens __UpperCamelCase = self.get_tokenizers(fast=__A , do_lower_case=__A ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCamelCase = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] __UpperCamelCase = tokenizer.convert_tokens_to_string(__A ) self.assertIsInstance(__A , __A )
53
'''simple docstring''' import os import numpy import onnx def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Dict: """simple docstring""" __UpperCamelCase = a.name __UpperCamelCase = b.name __UpperCamelCase = '' __UpperCamelCase = '' __UpperCamelCase = a == b __UpperCamelCase = name_a __UpperCamelCase = name_b return res def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : List[Any] ) -> Optional[int]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(__lowercase , __lowercase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) _graph_replace_input_with(node_proto.attribute[1].g , __lowercase , __lowercase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) def lowercase__ ( __lowercase : int , __lowercase : List[Any] , __lowercase : Dict ) -> int: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(__lowercase , __lowercase , __lowercase ) def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : str ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __UpperCamelCase = inits[i].name __UpperCamelCase = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = os.path.dirname(__lowercase ) __UpperCamelCase = os.path.basename(__lowercase ) __UpperCamelCase = onnx.load(os.path.join(__lowercase , __lowercase ) ) __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = set() __UpperCamelCase = {} __UpperCamelCase = [] __UpperCamelCase = 0 for i in range(len(__lowercase ) ): if i in dup_set: continue for j in range(i + 1 , len(__lowercase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(__lowercase ) dup_set.add(__lowercase ) __UpperCamelCase = inits[j].data_type __UpperCamelCase = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , __lowercase ) total_reduced_size += mem_size __UpperCamelCase = inits[i].name __UpperCamelCase = inits[j].name if name_i in dup_map: dup_map[name_i].append(__lowercase ) else: __UpperCamelCase = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) __UpperCamelCase = sorted(__lowercase ) _remove_dup_initializers_from_model(__lowercase , __lowercase , __lowercase ) __UpperCamelCase = 'optimized_' + model_file_name __UpperCamelCase = os.path.join(__lowercase , __lowercase ) onnx.save(__lowercase , __lowercase ) return new_model
53
1
'''simple docstring''' def lowercase__ ( __lowercase : float , __lowercase : int ) -> float: """simple docstring""" if digit_amount > 0: return round(number - int(__lowercase ) , __lowercase ) return number - int(__lowercase ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
53
'''simple docstring''' import random def lowercase__ ( __lowercase : list , __lowercase : Optional[Any] ) -> tuple: """simple docstring""" __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = [], [], [] for element in data: if element < pivot: less.append(__lowercase ) elif element > pivot: greater.append(__lowercase ) else: equal.append(__lowercase ) return less, equal, greater def lowercase__ ( __lowercase : list , __lowercase : int ) -> Dict: """simple docstring""" if index >= len(__lowercase ) or index < 0: return None __UpperCamelCase = items[random.randint(0 , len(__lowercase ) - 1 )] __UpperCamelCase = 0 __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = _partition(__lowercase , __lowercase ) __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__lowercase , __lowercase ) # must be in larger else: return quick_select(__lowercase , index - (m + count) )
53
1
'''simple docstring''' import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures a__ : int =logging.get_logger(__name__) @dataclass class snake_case : """simple docstring""" SCREAMING_SNAKE_CASE_ : str =field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} ) SCREAMING_SNAKE_CASE_ : str =field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) SCREAMING_SNAKE_CASE_ : int =field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) SCREAMING_SNAKE_CASE_ : bool =field( default=__lowerCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = self.task_name.lower() class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] ="train" SCREAMING_SNAKE_CASE_ : Dict ="dev" SCREAMING_SNAKE_CASE_ : Dict ="test" class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : GlueDataTrainingArguments SCREAMING_SNAKE_CASE_ : str SCREAMING_SNAKE_CASE_ : List[InputFeatures] def __init__( self : Optional[Any] , __A : GlueDataTrainingArguments , __A : PreTrainedTokenizerBase , __A : Optional[int] = None , __A : Union[str, Split] = Split.train , __A : Optional[str] = None , ): warnings.warn( 'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , __A , ) __UpperCamelCase = args __UpperCamelCase = glue_processors[args.task_name]() __UpperCamelCase = glue_output_modes[args.task_name] if isinstance(__A , __A ): try: __UpperCamelCase = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) # Load data features from cache or dataset file __UpperCamelCase = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , ) __UpperCamelCase = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) __UpperCamelCase , __UpperCamelCase = label_list[2], label_list[1] __UpperCamelCase = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __UpperCamelCase = cached_features_file + '.lock' with FileLock(__A ): if os.path.exists(__A ) and not args.overwrite_cache: __UpperCamelCase = time.time() __UpperCamelCase = torch.load(__A ) logger.info( f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start ) else: logger.info(f'''Creating features from dataset file at {args.data_dir}''' ) if mode == Split.dev: __UpperCamelCase = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: __UpperCamelCase = self.processor.get_test_examples(args.data_dir ) else: __UpperCamelCase = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: __UpperCamelCase = examples[:limit_length] __UpperCamelCase = glue_convert_examples_to_features( __A , __A , max_length=args.max_seq_length , label_list=__A , output_mode=self.output_mode , ) __UpperCamelCase = time.time() torch.save(self.features , __A ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self : Any ): return len(self.features ) def __getitem__( self : Dict , __A : Union[str, Any] ): return self.features[i] def _lowerCamelCase ( self : Optional[int] ): return self.label_list
53
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowercase__ ( __lowercase : Any ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def lowercase__ ( __lowercase : Tuple ) -> int: """simple docstring""" __UpperCamelCase , __UpperCamelCase = emb.weight.shape __UpperCamelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) __UpperCamelCase = emb.weight.data return lin_layer def lowercase__ ( __lowercase : int , __lowercase : List[str]="facebook/mbart-large-en-ro" , __lowercase : str=False , __lowercase : List[Any]=False ) -> int: """simple docstring""" __UpperCamelCase = torch.load(__lowercase , map_location='cpu' )['model'] remove_ignore_keys_(__lowercase ) __UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0] __UpperCamelCase = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase ) if mbart_aa and finetuned: __UpperCamelCase = 'relu' __UpperCamelCase = state_dict['decoder.embed_tokens.weight'] __UpperCamelCase = MBartForConditionalGeneration(__lowercase ) model.model.load_state_dict(__lowercase ) if finetuned: __UpperCamelCase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a__ : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') a__ : Union[str, Any] =parser.parse_args() a__ : str =convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
53
1
'''simple docstring''' import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() a__ : int =logging.get_logger(__name__) def lowercase__ ( __lowercase : Tuple , __lowercase : Any ) -> List[Any]: """simple docstring""" __UpperCamelCase = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ('encoder.deit.cls_token', 'encoder.embeddings.cls_token'), ('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'), ('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'), ('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'), ('encoder.deit.norm.weight', 'encoder.layernorm.weight'), ('encoder.deit.norm.bias', 'encoder.layernorm.bias'), ] ) return rename_keys def lowercase__ ( __lowercase : Optional[Any] , __lowercase : int ) -> Tuple: """simple docstring""" for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) __UpperCamelCase = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' ) __UpperCamelCase = in_proj_weight[ : encoder_config.hidden_size, : ] __UpperCamelCase = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] __UpperCamelCase = in_proj_weight[ -encoder_config.hidden_size :, : ] def lowercase__ ( __lowercase : List[str] , __lowercase : str , __lowercase : int ) -> List[Any]: """simple docstring""" __UpperCamelCase = dct.pop(__lowercase ) __UpperCamelCase = val def lowercase__ ( __lowercase : Tuple ) -> Any: """simple docstring""" if "handwritten" in checkpoint_url: __UpperCamelCase = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: __UpperCamelCase = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg' __UpperCamelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('RGB' ) return im @torch.no_grad() def lowercase__ ( __lowercase : Dict , __lowercase : str ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = ViTConfig(image_size=384 , qkv_bias=__lowercase ) __UpperCamelCase = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: __UpperCamelCase = 768 elif "large" in checkpoint_url: # use ViT-large encoder __UpperCamelCase = 1024 __UpperCamelCase = 4096 __UpperCamelCase = 24 __UpperCamelCase = 16 __UpperCamelCase = 1024 else: raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: __UpperCamelCase = False __UpperCamelCase = 'relu' __UpperCamelCase = 1024 __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = False # load HuggingFace model __UpperCamelCase = ViTModel(__lowercase , add_pooling_layer=__lowercase ) __UpperCamelCase = TrOCRForCausalLM(__lowercase ) __UpperCamelCase = VisionEncoderDecoderModel(encoder=__lowercase , decoder=__lowercase ) model.eval() # load state_dict of original model, rename some keys __UpperCamelCase = torch.hub.load_state_dict_from_url(__lowercase , map_location='cpu' , check_hash=__lowercase )['model'] __UpperCamelCase = create_rename_keys(__lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): __UpperCamelCase = state_dict.pop(__lowercase ) if key.startswith('decoder' ) and "output_projection" not in key: __UpperCamelCase = val else: __UpperCamelCase = val # load state dict model.load_state_dict(__lowercase ) # Check outputs on an image __UpperCamelCase = ViTImageProcessor(size=encoder_config.image_size ) __UpperCamelCase = RobertaTokenizer.from_pretrained('roberta-large' ) __UpperCamelCase = TrOCRProcessor(__lowercase , __lowercase ) __UpperCamelCase = processor(images=prepare_img(__lowercase ) , return_tensors='pt' ).pixel_values # verify logits __UpperCamelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) __UpperCamelCase = model(pixel_values=__lowercase , decoder_input_ids=__lowercase ) __UpperCamelCase = outputs.logits __UpperCamelCase = torch.Size([1, 1, 50265] ) if "trocr-base-handwritten" in checkpoint_url: __UpperCamelCase = torch.tensor( [-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] ) elif "trocr-large-handwritten" in checkpoint_url: __UpperCamelCase = torch.tensor( [-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] ) elif "trocr-base-printed" in checkpoint_url: __UpperCamelCase = torch.tensor( [-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] ) elif "trocr-large-printed" in checkpoint_url: __UpperCamelCase = torch.tensor( [-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , __lowercase , atol=1e-3 ), "First elements of logits not as expected" Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__lowercase ) if __name__ == "__main__": a__ : Any =argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) a__ : str =parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
53
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : Any , __A : Dict , __A : str , __A : List[Any]=1_0_2_4 , __A : Tuple=1_0_2_4 , __A : str=3.6 ): __UpperCamelCase = tokenizer __UpperCamelCase = tokenizer.bos_token_id __UpperCamelCase = dataset __UpperCamelCase = seq_length __UpperCamelCase = seq_length * chars_per_token * num_of_sequences def __iter__( self : Any ): __UpperCamelCase = iter(self.dataset ) __UpperCamelCase = True while more_examples: __UpperCamelCase , __UpperCamelCase = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(__A )['content'] ) buffer_len += len(buffer[-1] ) except StopIteration: __UpperCamelCase = False break __UpperCamelCase = tokenizer(__A , truncation=__A )['input_ids'] __UpperCamelCase = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(__A ) , self.seq_length ): __UpperCamelCase = all_token_ids[i : i + self.seq_length] if len(__A ) == self.seq_length: yield torch.tensor(__A ) def lowercase__ ( __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = {'streaming': True} __UpperCamelCase = load_dataset(args.dataset_name , split='train' , **__lowercase ) __UpperCamelCase = ConstantLengthDataset(__lowercase , __lowercase , seq_length=args.seq_length ) __UpperCamelCase = DataLoader(__lowercase , batch_size=args.batch_size ) return eval_dataloader def lowercase__ ( __lowercase : Tuple ) -> Optional[Any]: """simple docstring""" model.eval() __UpperCamelCase = [] for step, batch in enumerate(__lowercase ): with torch.no_grad(): __UpperCamelCase = model(__lowercase , labels=__lowercase ) __UpperCamelCase = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__lowercase ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break __UpperCamelCase = torch.mean(torch.cat(__lowercase ) ) try: __UpperCamelCase = torch.exp(__lowercase ) except OverflowError: __UpperCamelCase = float('inf' ) return loss.item(), perplexity.item() # Setup Accelerator a__ : int =Accelerator() # Parse configuration a__ : Dict =HfArgumentParser(EvaluationArguments) a__ : Union[str, Any] =parser.parse_args() set_seed(args.seed) # Logging a__ : List[Any] =logging.getLogger(__name__) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) # Load model and tokenizer a__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(args.model_ckpt) a__ : List[Any] =AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader a__ : Union[str, Any] =create_dataloader(args) # Prepare everything with our `accelerator`. a__ , a__ : List[str] =accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('''Evaluating and saving model after training''') a__ , a__ : Any =evaluate(args) logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
53
1
'''simple docstring''' import unittest from transformers import DonutProcessor a__ : List[str] ='''naver-clova-ix/donut-base''' class snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCamelCase ( self : Dict ): __UpperCamelCase = DonutProcessor.from_pretrained(__A ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } __UpperCamelCase = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) __UpperCamelCase = self.processor.tokenajson(__A ) self.assertDictEqual(__A , __A )
53
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging a__ : Any =logging.get_logger(__name__) a__ : Optional[Any] ={ '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict ="gpt_neo" SCREAMING_SNAKE_CASE_ : Optional[int] =["past_key_values"] SCREAMING_SNAKE_CASE_ : List[Any] ={"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Union[str, Any] , __A : Union[str, Any]=5_0_2_5_7 , __A : Any=2_0_4_8 , __A : Optional[Any]=2_0_4_8 , __A : Any=2_4 , __A : Union[str, Any]=[[["global", "local"], 1_2]] , __A : str=1_6 , __A : Optional[int]=None , __A : Union[str, Any]=2_5_6 , __A : Any="gelu_new" , __A : Dict=0.0 , __A : Optional[int]=0.0 , __A : int=0.0 , __A : List[str]=0.1 , __A : Any=1e-5 , __A : int=0.02 , __A : List[str]=True , __A : Tuple=5_0_2_5_6 , __A : Optional[Any]=5_0_2_5_6 , **__A : Optional[Any] , ): __UpperCamelCase = vocab_size __UpperCamelCase = max_position_embeddings __UpperCamelCase = hidden_size __UpperCamelCase = num_layers __UpperCamelCase = num_heads __UpperCamelCase = intermediate_size __UpperCamelCase = window_size __UpperCamelCase = activation_function __UpperCamelCase = resid_dropout __UpperCamelCase = embed_dropout __UpperCamelCase = attention_dropout __UpperCamelCase = classifier_dropout __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = initializer_range __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id __UpperCamelCase = attention_types __UpperCamelCase = self.expand_attention_types_params(__A ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=__A , eos_token_id=__A , **__A ) @staticmethod def _lowerCamelCase ( __A : Tuple ): __UpperCamelCase = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase__ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Any: """simple docstring""" import torch __UpperCamelCase = input.size() __UpperCamelCase = len(__lowercase ) __UpperCamelCase = shape[dimension] __UpperCamelCase = torch.arange(0 , __lowercase , __lowercase ) __UpperCamelCase = torch.div(sizedim - size , __lowercase , rounding_mode='floor' ) + 1 __UpperCamelCase = torch.arange(__lowercase ) + low_indices[:min_length][:, None] __UpperCamelCase = [slice(__lowercase )] * rank __UpperCamelCase = indices __UpperCamelCase = input[s] __UpperCamelCase = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Optional[int]: """simple docstring""" import torch __UpperCamelCase = torch.arange(1 , __lowercase ) __UpperCamelCase = torch.remainder(__lowercase , __lowercase ) __UpperCamelCase = remainders == 0 __UpperCamelCase = candidates[divisor_indices] __UpperCamelCase = torch.max(__lowercase ) return largest_divisor, torch.div(__lowercase , __lowercase , rounding_mode='floor' ) class snake_case ( __lowerCamelCase ): """simple docstring""" @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(__A , direction='inputs' ) __UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return common_inputs @property def _lowerCamelCase ( self : int ): return self._config.num_heads def _lowerCamelCase ( self : List[str] , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ): __UpperCamelCase = super(__A , self ).generate_dummy_inputs( __A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A ) # We need to order the input in the way they appears in the forward() __UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCamelCase = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] __UpperCamelCase = common_inputs['attention_mask'] if self.use_past: __UpperCamelCase = ordered_inputs['attention_mask'].dtype __UpperCamelCase = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 ) return ordered_inputs @property def _lowerCamelCase ( self : Dict ): return 1_3
53
1
'''simple docstring''' a__ : Union[str, Any] ={ '''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''', '''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''', '''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''', '''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''', '''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''', '''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''', ''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''', '''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''', '''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/''' } # Exclamation mark is not in ITU-R recommendation # fmt: on a__ : List[Any] ={value: key for key, value in MORSE_CODE_DICT.items()} def lowercase__ ( __lowercase : str ) -> str: """simple docstring""" return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def lowercase__ ( __lowercase : str ) -> str: """simple docstring""" return "".join(REVERSE_DICT[char] for char in message.split() ) def lowercase__ ( ) -> None: """simple docstring""" __UpperCamelCase = 'Morse code here!' print(__lowercase ) __UpperCamelCase = encrypt(__lowercase ) print(__lowercase ) __UpperCamelCase = decrypt(__lowercase ) print(__lowercase ) if __name__ == "__main__": main()
53
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa" SCREAMING_SNAKE_CASE_ : Dict =( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) SCREAMING_SNAKE_CASE_ : List[str] ="document_qa" SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"] SCREAMING_SNAKE_CASE_ : Any =["text"] def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ): if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*__A , **__A ) def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ): __UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' __UpperCamelCase = task_prompt.replace('{user_input}' , __A ) __UpperCamelCase = self.pre_processor.tokenizer( __A , add_special_tokens=__A , return_tensors='pt' ).input_ids __UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ): return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences def _lowerCamelCase ( self : Tuple , __A : List[Any] ): __UpperCamelCase = self.pre_processor.batch_decode(__A )[0] __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) __UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token __UpperCamelCase = self.pre_processor.tokenajson(__A ) return sequence["answer"]
53
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCamelCase ( self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self : Any ): __UpperCamelCase = 1 __UpperCamelCase = 3 __UpperCamelCase = (3_2, 3_2) __UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__A ) return image @property def _lowerCamelCase ( self : int ): torch.manual_seed(0 ) __UpperCamelCase = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , ) return model @property def _lowerCamelCase ( self : int ): torch.manual_seed(0 ) __UpperCamelCase = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def _lowerCamelCase ( self : Any ): torch.manual_seed(0 ) __UpperCamelCase = RobertaSeriesConfig( hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , ) return RobertaSeriesModelWithTransformation(__A ) @property def _lowerCamelCase ( self : Any ): def extract(*__A : int , **__A : Dict ): class snake_case : """simple docstring""" def __init__( self : int ): __UpperCamelCase = torch.ones([0] ) def _lowerCamelCase ( self : int , __A : Union[str, Any] ): self.pixel_values.to(__A ) return self return Out() return extract def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.dummy_cond_unet __UpperCamelCase = PNDMScheduler(skip_prk_steps=__A ) __UpperCamelCase = self.dummy_vae __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) __UpperCamelCase = 7_7 __UpperCamelCase = self.dummy_image.to(__A ) __UpperCamelCase = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk __UpperCamelCase = AltDiffusionImgaImgPipeline( unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , ) __UpperCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__A ) __UpperCamelCase = alt_pipe.to(__A ) alt_pipe.set_progress_bar_config(disable=__A ) __UpperCamelCase = 'A painting of a squirrel eating a burger' __UpperCamelCase = torch.Generator(device=__A ).manual_seed(0 ) __UpperCamelCase = alt_pipe( [prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=__A , ) __UpperCamelCase = output.images __UpperCamelCase = torch.Generator(device=__A ).manual_seed(0 ) __UpperCamelCase = alt_pipe( [prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=__A , return_dict=__A , )[0] __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) __UpperCamelCase = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3 @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def _lowerCamelCase ( self : int ): __UpperCamelCase = self.dummy_cond_unet __UpperCamelCase = PNDMScheduler(skip_prk_steps=__A ) __UpperCamelCase = self.dummy_vae __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) __UpperCamelCase = 7_7 __UpperCamelCase = self.dummy_image.to(__A ) # put models in fp16 __UpperCamelCase = unet.half() __UpperCamelCase = vae.half() __UpperCamelCase = bert.half() # make sure here that pndm scheduler skips prk __UpperCamelCase = AltDiffusionImgaImgPipeline( unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , ) __UpperCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__A ) __UpperCamelCase = alt_pipe.to(__A ) alt_pipe.set_progress_bar_config(disable=__A ) __UpperCamelCase = 'A painting of a squirrel eating a burger' __UpperCamelCase = torch.manual_seed(0 ) __UpperCamelCase = alt_pipe( [prompt] , generator=__A , num_inference_steps=2 , output_type='np' , image=__A , ).images assert image.shape == (1, 3_2, 3_2, 3) @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) # resize to resolution that is divisible by 8 but not 16 or 32 __UpperCamelCase = init_image.resize((7_6_0, 5_0_4) ) __UpperCamelCase = 'BAAI/AltDiffusion' __UpperCamelCase = AltDiffusionImgaImgPipeline.from_pretrained( __A , safety_checker=__A , ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() __UpperCamelCase = 'A fantasy landscape, trending on artstation' __UpperCamelCase = torch.manual_seed(0 ) __UpperCamelCase = pipe( prompt=__A , image=__A , strength=0.75 , guidance_scale=7.5 , generator=__A , output_type='np' , ) __UpperCamelCase = output.images[0] __UpperCamelCase = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert image.shape == (5_0_4, 7_6_0, 3) __UpperCamelCase = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCamelCase ( self : List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) __UpperCamelCase = init_image.resize((7_6_8, 5_1_2) ) __UpperCamelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' ) __UpperCamelCase = 'BAAI/AltDiffusion' __UpperCamelCase = AltDiffusionImgaImgPipeline.from_pretrained( __A , safety_checker=__A , ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() __UpperCamelCase = 'A fantasy landscape, trending on artstation' __UpperCamelCase = torch.manual_seed(0 ) __UpperCamelCase = pipe( prompt=__A , image=__A , strength=0.75 , guidance_scale=7.5 , generator=__A , output_type='np' , ) __UpperCamelCase = output.images[0] assert image.shape == (5_1_2, 7_6_8, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1e-2
53
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
53
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class snake_case ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , __A : Union[str, Any] , __A : Tuple=7 , __A : Tuple=3 , __A : Optional[int]=1_8 , __A : int=3_0 , __A : Any=4_0_0 , __A : Optional[Any]=True , __A : List[Any]=None , __A : Union[str, Any]=True , __A : Any=None , ): __UpperCamelCase = size if size is not None else {'shortest_edge': 2_0} __UpperCamelCase = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = num_channels __UpperCamelCase = image_size __UpperCamelCase = min_resolution __UpperCamelCase = max_resolution __UpperCamelCase = do_resize __UpperCamelCase = size __UpperCamelCase = do_center_crop __UpperCamelCase = crop_size def _lowerCamelCase ( self : Tuple ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class snake_case ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str =MobileNetVaImageProcessor if is_vision_available() else None def _lowerCamelCase ( self : str ): __UpperCamelCase = MobileNetVaImageProcessingTester(self ) @property def _lowerCamelCase ( self : int ): return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , 'do_resize' ) ) self.assertTrue(hasattr(__A , 'size' ) ) self.assertTrue(hasattr(__A , 'do_center_crop' ) ) self.assertTrue(hasattr(__A , 'crop_size' ) ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 2_0} ) self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} ) __UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {'shortest_edge': 4_2} ) self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} ) def _lowerCamelCase ( self : List[str] ): pass def _lowerCamelCase ( self : Union[str, Any] ): # Initialize image_processing __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input __UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __UpperCamelCase = image_processing(__A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def _lowerCamelCase ( self : Union[str, Any] ): # Initialize image_processing __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input __UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __UpperCamelCase = image_processing(__A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def _lowerCamelCase ( self : Union[str, Any] ): # Initialize image_processing __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input __UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __UpperCamelCase = image_processing(__A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
53
'''simple docstring''' import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase__ ( __lowercase : Features ) -> Optional[int]: """simple docstring""" __UpperCamelCase = np.inf def set_batch_size(__lowercase : FeatureType ) -> None: nonlocal batch_size if isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary": __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__lowercase , __lowercase ) return None if batch_size is np.inf else batch_size class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : List[str] , __A : NestedDataStructureLike[PathLike] , __A : Optional[NamedSplit] = None , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[int] = None , **__A : Dict , ): super().__init__( __A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , ) __UpperCamelCase = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths} __UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1] __UpperCamelCase = Parquet( cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , ) def _lowerCamelCase ( self : Optional[int] ): # Build iterable dataset if self.streaming: __UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None self.builder.download_and_prepare( download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , ) __UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=__A , in_memory=self.keep_in_memory ) return dataset class snake_case : """simple docstring""" def __init__( self : List[str] , __A : Dataset , __A : Union[PathLike, BinaryIO] , __A : Optional[int] = None , **__A : Dict , ): __UpperCamelCase = dataset __UpperCamelCase = path_or_buf __UpperCamelCase = batch_size or get_writer_batch_size(dataset.features ) __UpperCamelCase = parquet_writer_kwargs def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , 'wb+' ) as buffer: __UpperCamelCase = self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs ) else: __UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs ) return written def _lowerCamelCase ( self : List[str] , __A : BinaryIO , __A : int , **__A : List[str] ): __UpperCamelCase = 0 __UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __A ) __UpperCamelCase = self.dataset.features.arrow_schema __UpperCamelCase = pq.ParquetWriter(__A , schema=__A , **__A ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): __UpperCamelCase = query_table( table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__A ) written += batch.nbytes writer.close() return written
53
1
'''simple docstring''' import torch from transformers import AutoModel class snake_case ( torch.nn.Module ): """simple docstring""" def __init__( self : Optional[int] , __A : List[Any]="sayef/fsner-bert-base-uncased" ): super(__A , self ).__init__() __UpperCamelCase = AutoModel.from_pretrained(__A , return_dict=__A ) __UpperCamelCase = torch.nn.CosineSimilarity(3 , 1e-08 ) __UpperCamelCase = torch.nn.Softmax(dim=1 ) def _lowerCamelCase ( self : Tuple , **__A : Optional[int] ): return self.bert(**__A ).last_hidden_state def _lowerCamelCase ( self : Tuple , __A : Tuple ): return token_embeddings.sum(2 , keepdim=__A ) def _lowerCamelCase ( self : List[Any] , __A : str , __A : int , __A : str=1 ): return self.softmax(T * self.cos(__A , __A ) ) def _lowerCamelCase ( self : Optional[int] , __A : str , __A : Any ): __UpperCamelCase = W_supports['sizes'].tolist() __UpperCamelCase = W_supports['start_token_id'].item() __UpperCamelCase = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] __UpperCamelCase = self.BERT(**__A ) __UpperCamelCase = self.BERT(**__A ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = W_supports['input_ids'] == start_token_id __UpperCamelCase = W_supports['input_ids'] == end_token_id for i, size in enumerate(__A ): if i == 0: __UpperCamelCase = 0 else: __UpperCamelCase = support_sizes[i - 1] __UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]] __UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]] __UpperCamelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) __UpperCamelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: __UpperCamelCase = torch.vstack((p_starts, p_start) ) __UpperCamelCase = torch.vstack((p_ends, p_end) ) else: __UpperCamelCase = p_start __UpperCamelCase = p_end return p_starts, p_ends
53
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def lowercase__ ( __lowercase : SplitDict ) -> int: """simple docstring""" __UpperCamelCase = split_dict._to_yaml_list() assert len(__lowercase ) == len(__lowercase ) __UpperCamelCase = SplitDict._from_yaml_list(__lowercase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump __UpperCamelCase = None # the split name of split_dict takes over the name of the split info object __UpperCamelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] ) def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" __UpperCamelCase = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
53
1
'''simple docstring''' def lowercase__ ( __lowercase : int , __lowercase : int ) -> str: """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError('both inputs must be positive integers' ) __UpperCamelCase = str(bin(__lowercase ) ) binary_number += "0" * shift_amount return binary_number def lowercase__ ( __lowercase : int , __lowercase : int ) -> str: """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError('both inputs must be positive integers' ) __UpperCamelCase = str(bin(__lowercase ) )[2:] if shift_amount >= len(__lowercase ): return "0b0" __UpperCamelCase = binary_number[: len(__lowercase ) - shift_amount] return "0b" + shifted_binary_number def lowercase__ ( __lowercase : int , __lowercase : int ) -> str: """simple docstring""" if number >= 0: # Get binary representation of positive number __UpperCamelCase = '0' + str(bin(__lowercase ) ).strip('-' )[2:] else: # Get binary (2's complement) representation of negative number __UpperCamelCase = len(bin(__lowercase )[3:] ) # Find 2's complement of number __UpperCamelCase = bin(abs(__lowercase ) - (1 << binary_number_length) )[3:] __UpperCamelCase = ( '1' + '0' * (binary_number_length - len(__lowercase )) + binary_number ) if shift_amount >= len(__lowercase ): return "0b" + binary_number[0] * len(__lowercase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(__lowercase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
53
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[str] ={ '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any =[ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
1
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. a__ : Optional[Any] =abspath(join(dirname(dirname(dirname(__file__))), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def lowercase__ ( __lowercase : List[str] ) -> Optional[int]: """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main __UpperCamelCase = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(__lowercase , id=__lowercase )
53
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a__ : str =logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str =["input_features", "attention_mask"] def __init__( self : Union[str, Any] , __A : Optional[int]=8_0 , __A : Tuple=1_6_0_0_0 , __A : Optional[Any]=8_0 , __A : Any=0.0 , __A : Any=True , __A : List[str]=True , __A : str=True , **__A : List[Any] , ): super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A ) __UpperCamelCase = num_mel_bins __UpperCamelCase = do_ceptral_normalize __UpperCamelCase = normalize_means __UpperCamelCase = normalize_vars __UpperCamelCase = True def _lowerCamelCase ( self : Union[str, Any] , __A : np.ndarray , ): __UpperCamelCase = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers __UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 ) __UpperCamelCase = ta_kaldi.fbank(__A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _lowerCamelCase ( __A : np.ndarray , __A : int , __A : Optional[bool] = True , __A : Optional[bool] = True , __A : float = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: __UpperCamelCase = x[:input_length].mean(axis=0 ) __UpperCamelCase = np.subtract(__A , __A ) if normalize_vars: __UpperCamelCase = x[:input_length].std(axis=0 ) __UpperCamelCase = np.divide(__A , __A ) if input_length < x.shape[0]: __UpperCamelCase = padding_value # make sure array is in float32 __UpperCamelCase = x.astype(np.floataa ) return x def _lowerCamelCase ( self : int , __A : List[np.ndarray] , __A : Optional[np.ndarray] = None ): __UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__A , __A , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(__A , __A ) ] def __call__( self : List[Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : Union[bool, str, PaddingStrategy] = False , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[int] = None , __A : Optional[bool] = None , **__A : Dict , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __UpperCamelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) __UpperCamelCase = is_batched_numpy or ( isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__A , np.ndarray ): __UpperCamelCase = np.asarray(__A , dtype=np.floataa ) elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __UpperCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __UpperCamelCase = [raw_speech] # extract fbank features __UpperCamelCase = [self._extract_fbank_features(__A ) for waveform in raw_speech] # convert into correct format for padding __UpperCamelCase = BatchFeature({'input_features': features} ) __UpperCamelCase = self.pad( __A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , ) # make sure list is in array format __UpperCamelCase = padded_inputs.get('input_features' ) if isinstance(input_features[0] , __A ): __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_features] __UpperCamelCase = padded_inputs.get('attention_mask' ) if attention_mask is not None: __UpperCamelCase = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __UpperCamelCase = ( np.array(__A , dtype=np.intaa ) if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD else None ) __UpperCamelCase = self.normalize( padded_inputs['input_features'] , attention_mask=__A ) if return_tensors is not None: __UpperCamelCase = padded_inputs.convert_to_tensors(__A ) return padded_inputs
53
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : List[Any] =logging.get_logger(__name__) a__ : List[Any] ={ '''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model" def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ): super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = project_dim class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model" def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ): super().__init__(**__A ) __UpperCamelCase = hidden_size __UpperCamelCase = intermediate_size __UpperCamelCase = projection_dim __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = num_channels __UpperCamelCase = patch_size __UpperCamelCase = image_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = attention_dropout __UpperCamelCase = layer_norm_eps __UpperCamelCase = hidden_act @classmethod def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ): cls._set_token_in_kwargs(__A ) __UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('model_type' ) == "altclip": __UpperCamelCase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] ="altclip" SCREAMING_SNAKE_CASE_ : Optional[int] =True def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). __UpperCamelCase = kwargs.pop('text_config_dict' , __A ) __UpperCamelCase = kwargs.pop('vision_config_dict' , __A ) super().__init__(**__A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __UpperCamelCase = {} # This is the complete result when using `text_config_dict`. __UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __UpperCamelCase = {} # This is the complete result when using `vision_config_dict`. __UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __UpperCamelCase = { str(__A ): value for key, value in _vision_config_dict['id2label'].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __UpperCamelCase = {} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' ) if vision_config is None: __UpperCamelCase = {} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' ) __UpperCamelCase = AltCLIPTextConfig(**__A ) __UpperCamelCase = AltCLIPVisionConfig(**__A ) __UpperCamelCase = projection_dim __UpperCamelCase = logit_scale_init_value __UpperCamelCase = 1.0 @classmethod def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = copy.deepcopy(self.__dict__ ) __UpperCamelCase = self.text_config.to_dict() __UpperCamelCase = self.vision_config.to_dict() __UpperCamelCase = self.__class__.model_type return output
53
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : List[Any] =logging.get_logger(__name__) a__ : List[Any] ={ '''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model" def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ): super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = project_dim class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model" def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ): super().__init__(**__A ) __UpperCamelCase = hidden_size __UpperCamelCase = intermediate_size __UpperCamelCase = projection_dim __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = num_channels __UpperCamelCase = patch_size __UpperCamelCase = image_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = attention_dropout __UpperCamelCase = layer_norm_eps __UpperCamelCase = hidden_act @classmethod def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ): cls._set_token_in_kwargs(__A ) __UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('model_type' ) == "altclip": __UpperCamelCase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] ="altclip" SCREAMING_SNAKE_CASE_ : Optional[int] =True def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). __UpperCamelCase = kwargs.pop('text_config_dict' , __A ) __UpperCamelCase = kwargs.pop('vision_config_dict' , __A ) super().__init__(**__A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __UpperCamelCase = {} # This is the complete result when using `text_config_dict`. __UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __UpperCamelCase = {} # This is the complete result when using `vision_config_dict`. __UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __UpperCamelCase = { str(__A ): value for key, value in _vision_config_dict['id2label'].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __UpperCamelCase = {} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' ) if vision_config is None: __UpperCamelCase = {} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' ) __UpperCamelCase = AltCLIPTextConfig(**__A ) __UpperCamelCase = AltCLIPVisionConfig(**__A ) __UpperCamelCase = projection_dim __UpperCamelCase = logit_scale_init_value __UpperCamelCase = 1.0 @classmethod def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = copy.deepcopy(self.__dict__ ) __UpperCamelCase = self.text_config.to_dict() __UpperCamelCase = self.vision_config.to_dict() __UpperCamelCase = self.__class__.model_type return output
53
1
'''simple docstring''' from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class snake_case : """simple docstring""" SCREAMING_SNAKE_CASE_ : int SCREAMING_SNAKE_CASE_ : TreeNode | None =None SCREAMING_SNAKE_CASE_ : TreeNode | None =None a__ : Optional[int] =namedtuple('''CoinsDistribResult''', '''moves excess''') def lowercase__ ( __lowercase : TreeNode | None ) -> int: """simple docstring""" if root is None: return 0 # Validation def count_nodes(__lowercase : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(__lowercase : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(__lowercase ) != count_coins(__lowercase ): raise ValueError('The nodes number should be same as the number of coins' ) # Main calculation def get_distrib(__lowercase : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) __UpperCamelCase , __UpperCamelCase = get_distrib(node.left ) __UpperCamelCase , __UpperCamelCase = get_distrib(node.right ) __UpperCamelCase = 1 - left_distrib_excess __UpperCamelCase = 1 - right_distrib_excess __UpperCamelCase = ( left_distrib_moves + right_distrib_moves + abs(__lowercase ) + abs(__lowercase ) ) __UpperCamelCase = node.data - coins_to_left - coins_to_right return CoinsDistribResult(__lowercase , __lowercase ) return get_distrib(__lowercase )[0] if __name__ == "__main__": import doctest doctest.testmod()
53
'''simple docstring''' import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Any ) -> Optional[Any]: """simple docstring""" with open(__lowercase ) as metadata_file: __UpperCamelCase = json.load(__lowercase ) __UpperCamelCase = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['model_config'] ) # Load in the weights from the checkpoint_path __UpperCamelCase = torch.load(__lowercase , map_location='cpu' ) # Load the entity vocab file __UpperCamelCase = load_entity_vocab(__lowercase ) __UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks __UpperCamelCase = AddedToken('<ent>' , lstrip=__lowercase , rstrip=__lowercase ) __UpperCamelCase = AddedToken('<ent2>' , lstrip=__lowercase , rstrip=__lowercase ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(__lowercase ) with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(__lowercase , __lowercase ) __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase ) # Initialize the embeddings of the special tokens __UpperCamelCase = state_dict['embeddings.word_embeddings.weight'] __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 ) __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 ) __UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __UpperCamelCase = F'''encoder.layer.{layer_index}.attention.self.''' __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight'] __UpperCamelCase = entity_emb[entity_vocab['[MASK]']] __UpperCamelCase = LukeModel(config=__lowercase ).eval() __UpperCamelCase , __UpperCamelCase = model.load_state_dict(__lowercase , strict=__lowercase ) if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'''Missing keys {', '.join(__lowercase )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )): raise ValueError( 'Unexpected keys' F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' ) # Check outputs __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase , task='entity_classification' ) __UpperCamelCase = ( 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the' ' new world number one avoid a humiliating second- round exit at Wimbledon .' ) __UpperCamelCase = (39, 42) __UpperCamelCase = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors='pt' ) __UpperCamelCase = model(**__lowercase ) # Verify word hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 42, 1024) ) __UpperCamelCase = torch.tensor( [[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] ) else: # base __UpperCamelCase = torch.Size((1, 42, 768) ) __UpperCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 1, 1024) ) __UpperCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] ) else: # base __UpperCamelCase = torch.Size((1, 1, 768) ) __UpperCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(__lowercase ) ) model.save_pretrained(__lowercase ) def lowercase__ ( __lowercase : Dict ) -> List[str]: """simple docstring""" __UpperCamelCase = {} with open(__lowercase , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(__lowercase ): __UpperCamelCase , __UpperCamelCase = line.rstrip().split('\t' ) __UpperCamelCase = index return entity_vocab if __name__ == "__main__": a__ : Any =argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) a__ : str =parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
53
1
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowercase__ ( __lowercase : Any ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def lowercase__ ( __lowercase : Tuple ) -> int: """simple docstring""" __UpperCamelCase , __UpperCamelCase = emb.weight.shape __UpperCamelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) __UpperCamelCase = emb.weight.data return lin_layer def lowercase__ ( __lowercase : int , __lowercase : List[str]="facebook/mbart-large-en-ro" , __lowercase : str=False , __lowercase : List[Any]=False ) -> int: """simple docstring""" __UpperCamelCase = torch.load(__lowercase , map_location='cpu' )['model'] remove_ignore_keys_(__lowercase ) __UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0] __UpperCamelCase = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase ) if mbart_aa and finetuned: __UpperCamelCase = 'relu' __UpperCamelCase = state_dict['decoder.embed_tokens.weight'] __UpperCamelCase = MBartForConditionalGeneration(__lowercase ) model.model.load_state_dict(__lowercase ) if finetuned: __UpperCamelCase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a__ : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') a__ : Union[str, Any] =parser.parse_args() a__ : str =convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
53
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class snake_case ( __lowerCamelCase ): """simple docstring""" def _lowerCamelCase ( self : Any ): __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = 8 # DPR tok __UpperCamelCase = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __UpperCamelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok __UpperCamelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) ) __UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __UpperCamelCase = {'unk_token': '<unk>'} __UpperCamelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__A ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__A ) ) def _lowerCamelCase ( self : Tuple ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Optional[int] ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Union[str, Any] ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def _lowerCamelCase ( self : str ): shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _lowerCamelCase ( self : Any , __A : bool ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , ) if from_disk: __UpperCamelCase = os.path.join(self.tmpdirname , 'dataset' ) __UpperCamelCase = os.path.join(self.tmpdirname , 'index.faiss' ) dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) ) dataset.drop_index('embeddings' ) dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) ) del dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , ) return retriever def _lowerCamelCase ( self : int ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) __UpperCamelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' ) dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' ) pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) ) __UpperCamelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' ) __UpperCamelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset} pickle.dump(__A , open(__A , 'wb' ) ) __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , ) __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = self.get_dummy_dataset() retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_legacy_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] ) self.assertEqual(len(doc_dicts[0]['text'] ) , __A ) self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Optional[Any] ): import torch __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , np.ndarray ) __UpperCamelCase = retriever( __A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( # noqa: F841 out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], out['doc_ids'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dpr_ctx_encoder_tokenizer() __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) retriever.set_ctx_encoder_tokenizer(__A ) __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) self.assertEqual( len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
53
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch a__ : List[Any] =logging.get_logger(__name__) @dataclass class snake_case : """simple docstring""" def __init__( self : List[Any] , __A : str=False , __A : Dict=False , __A : Any=6.0 , __A : Optional[Any]=None , __A : Any=False , __A : int=False , __A : Dict=None , __A : Dict="fp4" , __A : Union[str, Any]=False , **__A : List[str] , ): __UpperCamelCase = load_in_abit __UpperCamelCase = load_in_abit __UpperCamelCase = llm_inta_threshold __UpperCamelCase = llm_inta_skip_modules __UpperCamelCase = llm_inta_enable_fpaa_cpu_offload __UpperCamelCase = llm_inta_has_fpaa_weight __UpperCamelCase = bnb_abit_quant_type __UpperCamelCase = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: __UpperCamelCase = torch.floataa elif isinstance(__A , __A ): __UpperCamelCase = getattr(__A , __A ) elif isinstance(__A , torch.dtype ): __UpperCamelCase = bnb_abit_compute_dtype else: raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' ) self.post_init() def _lowerCamelCase ( self : int ): if not isinstance(self.llm_inta_threshold , __A ): raise ValueError('llm_int8_threshold must be a float' ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __A ): raise ValueError('llm_int8_skip_modules must be a list of strings' ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __A ): raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' ) if not isinstance(self.llm_inta_has_fpaa_weight , __A ): raise ValueError('llm_int8_has_fp16_weight must be a boolean' ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' ) if not isinstance(self.bnb_abit_quant_type , __A ): raise ValueError('bnb_4bit_quant_type must be a string' ) if not isinstance(self.bnb_abit_use_double_quant , __A ): raise ValueError('bnb_4bit_use_double_quant must be a boolean' ) if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse( '0.39.0' ): raise ValueError( '4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' ) def _lowerCamelCase ( self : str ): return self.load_in_abit or self.load_in_abit def _lowerCamelCase ( self : Any ): if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def _lowerCamelCase ( cls : Dict , __A : Tuple , __A : Dict , **__A : List[Any] ): __UpperCamelCase = cls(**__A ) __UpperCamelCase = [] for key, value in kwargs.items(): if hasattr(__A , __A ): setattr(__A , __A , __A ) to_remove.append(__A ) for key in to_remove: kwargs.pop(__A , __A ) if return_unused_kwargs: return config, kwargs else: return config def _lowerCamelCase ( self : Optional[Any] , __A : Union[str, os.PathLike] ): with open(__A , 'w' , encoding='utf-8' ) as writer: __UpperCamelCase = self.to_dict() __UpperCamelCase = json.dumps(__A , indent=2 , sort_keys=__A ) + '\n' writer.write(__A ) def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = copy.deepcopy(self.__dict__ ) __UpperCamelCase = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1] return output def __repr__( self : Any ): return f'''{self.__class__.__name__} {self.to_json_string()}''' def _lowerCamelCase ( self : List[str] , __A : bool = True ): if use_diff is True: __UpperCamelCase = self.to_diff_dict() else: __UpperCamelCase = self.to_dict() return json.dumps(__A , indent=2 , sort_keys=__A ) + "\n" def _lowerCamelCase ( self : Union[str, Any] ): __UpperCamelCase = self.to_dict() # get the default config dict __UpperCamelCase = BitsAndBytesConfig().to_dict() __UpperCamelCase = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: __UpperCamelCase = value return serializable_config_dict
53
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[Any] ={ '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] =[ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys a__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
1
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys a__ : Tuple ='''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
53
'''simple docstring''' import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any]=False ) -> Tuple: """simple docstring""" try: __UpperCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __UpperCamelCase = default else: # KEY is set, convert it to True or False. try: __UpperCamelCase = strtobool(__lowercase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value a__ : str =parse_flag_from_env('''RUN_SLOW''', default=False) a__ : Union[str, Any] =parse_flag_from_env('''RUN_REMOTE''', default=False) a__ : List[str] =parse_flag_from_env('''RUN_LOCAL''', default=True) a__ : Optional[int] =parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression a__ : Any =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') a__ : Optional[int] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') a__ : List[str] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio a__ : Any =pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam a__ : Tuple =pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility a__ : Union[str, Any] =pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows a__ : int =pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase__ ( __lowercase : Optional[Any] ) -> Optional[int]: """simple docstring""" try: import faiss # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires faiss' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Any: """simple docstring""" try: import regex # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires regex' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Tuple ) -> List[Any]: """simple docstring""" try: import elasticsearch # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires elasticsearch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Tuple: """simple docstring""" try: import sqlalchemy # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires sqlalchemy' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[str] ) -> List[str]: """simple docstring""" if not config.TORCH_AVAILABLE: __UpperCamelCase = unittest.skip('test requires PyTorch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[Any] ) -> List[str]: """simple docstring""" if not config.TF_AVAILABLE: __UpperCamelCase = unittest.skip('test requires TensorFlow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : int ) -> Union[str, Any]: """simple docstring""" if not config.JAX_AVAILABLE: __UpperCamelCase = unittest.skip('test requires JAX' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> Optional[Any]: """simple docstring""" if not config.PIL_AVAILABLE: __UpperCamelCase = unittest.skip('test requires Pillow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : int ) -> int: """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> int: """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> Any: """simple docstring""" def _require_spacy_model(__lowercase : Any ): try: import spacy # noqa F401 spacy.load(__lowercase ) except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(__lowercase ) )(__lowercase ) else: return test_case return _require_spacy_model def lowercase__ ( __lowercase : Union[str, Any] ) -> str: """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]: """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: __UpperCamelCase = unittest.skip('test is slow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_local_tests or _run_local_tests == 0: __UpperCamelCase = unittest.skip('test is local' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: __UpperCamelCase = unittest.skip('test is packaged' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Any: """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: __UpperCamelCase = unittest.skip('test requires remote' )(__lowercase ) return test_case def lowercase__ ( *__lowercase : Optional[Any] ) -> Tuple: """simple docstring""" def decorate(cls : int ): for name, fn in cls.__dict__.items(): if callable(__lowercase ) and name.startswith('test' ): for decorator in decorators: __UpperCamelCase = decorator(__lowercase ) setattr(cls , __lowercase , __lowercase ) return cls return decorate class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =0 SCREAMING_SNAKE_CASE_ : List[Any] =1 SCREAMING_SNAKE_CASE_ : Union[str, Any] =2 @contextmanager def lowercase__ ( __lowercase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , __lowercase : Dict=1e-16 ) -> List[Any]: """simple docstring""" __UpperCamelCase = requests.Session().request def timeout_request(__lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , **__lowercase : List[str] ): # Change the url to an invalid url so that the connection hangs __UpperCamelCase = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) __UpperCamelCase = timeout try: return online_request(__lowercase , __lowercase , **__lowercase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __UpperCamelCase = url __UpperCamelCase = e.args[0] __UpperCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]''' ),) __UpperCamelCase = (max_retry_error,) raise def raise_connection_error(__lowercase : int , __lowercase : List[str] , **__lowercase : Union[str, Any] ): raise requests.ConnectionError('Offline mode is enabled.' , request=__lowercase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , __lowercase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , __lowercase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , __lowercase ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def lowercase__ ( *__lowercase : Any , **__lowercase : Dict ) -> Dict: """simple docstring""" __UpperCamelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__lowercase , **__lowercase ) as tmp_dir: try: os.chdir(__lowercase ) yield finally: os.chdir(__lowercase ) @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase__ ( __lowercase : List[str] , __lowercase : int ) -> Union[str, Any]: """simple docstring""" return deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(__lowercase : List[Any] , *__lowercase : Tuple , **__lowercase : Union[str, Any] ): try: return func(*__lowercase , **__lowercase ) except HTTPError as err: if str(__lowercase ).startswith('500' ) or str(__lowercase ).startswith('502' ): pytest.xfail(str(__lowercase ) ) raise err return decorator.decorator(_wrapper , __lowercase ) class snake_case : """simple docstring""" def __init__( self : int , __A : Any , __A : str , __A : List[Any] ): __UpperCamelCase = returncode __UpperCamelCase = stdout __UpperCamelCase = stderr async def lowercase__ ( __lowercase : Any , __lowercase : Optional[int] ) -> str: """simple docstring""" while True: __UpperCamelCase = await stream.readline() if line: callback(__lowercase ) else: break async def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : Any=None , __lowercase : Optional[Any]=None , __lowercase : int=False , __lowercase : List[Any]=False ) -> _RunOutput: """simple docstring""" if echo: print('\nRunning: ' , ' '.join(__lowercase ) ) __UpperCamelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __UpperCamelCase = [] __UpperCamelCase = [] def tee(__lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Tuple="" ): __UpperCamelCase = line.decode('utf-8' ).rstrip() sink.append(__lowercase ) if not quiet: print(__lowercase , __lowercase , file=__lowercase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label='stderr:' ) ), ] , timeout=__lowercase , ) return _RunOutput(await p.wait() , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : int=180 , __lowercase : int=False , __lowercase : str=True ) -> _RunOutput: """simple docstring""" __UpperCamelCase = asyncio.get_event_loop() __UpperCamelCase = loop.run_until_complete( _stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) ) __UpperCamelCase = ' '.join(__lowercase ) if result.returncode > 0: __UpperCamelCase = '\n'.join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' ) return result def lowercase__ ( ) -> List[str]: """simple docstring""" __UpperCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) __UpperCamelCase = re.sub(R'^gw' , '' , __lowercase , 0 , re.M ) return int(__lowercase ) def lowercase__ ( ) -> List[Any]: """simple docstring""" __UpperCamelCase = 29500 __UpperCamelCase = pytest_xdist_worker_id() return port + uniq_delta
53
1
'''simple docstring''' def lowercase__ ( __lowercase : int ) -> int: """simple docstring""" assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: __UpperCamelCase = F'''The input value of [n={number}] has to be > 0''' raise ValueError(__lowercase ) else: __UpperCamelCase = sylvester(number - 1 ) __UpperCamelCase = num - 1 __UpperCamelCase = num return lower * upper + 1 if __name__ == "__main__": print(f'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
53
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys a__ : Tuple ='''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
53
1
'''simple docstring''' a__ : str =''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' a__ : Tuple =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}] a__ : Dict ={ '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
53
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Tuple ) -> Tuple: """simple docstring""" return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :] def lowercase__ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] , __lowercase : List[str]="attention" ) -> Optional[Any]: """simple docstring""" __UpperCamelCase = __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] ) __UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] ) __UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] ) __UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] ) __UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : int , __lowercase : List[Any]=False ) -> Optional[Any]: """simple docstring""" if split_mlp_wi: __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :] __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :] __UpperCamelCase = (wi_a, wi_a) else: __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :] __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :] return wi, wo def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] ) -> str: """simple docstring""" return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i] def lowercase__ ( __lowercase : dict , *, __lowercase : int , __lowercase : bool , __lowercase : bool = False ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = traverse_util.flatten_dict(variables['target'] ) __UpperCamelCase = {'/'.join(__lowercase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old print('Split MLP:' , __lowercase ) __UpperCamelCase = collections.OrderedDict() # Shared embeddings. __UpperCamelCase = old['token_embedder/embedding'] # Encoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'encoder' , 'attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 1 (MLP). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_mlp_layer_norm' ) __UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'encoder' , __lowercase ) __UpperCamelCase = layer_norm if split_mlp_wi: __UpperCamelCase = wi[0].T __UpperCamelCase = wi[1].T else: __UpperCamelCase = wi.T __UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , __lowercase , 'encoder' ).T __UpperCamelCase = old['encoder/encoder_norm/scale'] if not scalable_attention: __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , 0 , 'encoder' ).T __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , 0 , 'decoder' ).T if not is_encoder_only: # Decoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_self_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'self_attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 1 (Cross Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_cross_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'encoder_decoder_attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 2 (MLP). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_mlp_layer_norm' ) __UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'decoder' , __lowercase ) __UpperCamelCase = layer_norm if split_mlp_wi: __UpperCamelCase = wi[0].T __UpperCamelCase = wi[1].T else: __UpperCamelCase = wi.T __UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase = tax_relpos_bias_lookup(__lowercase , __lowercase , 'decoder' ).T __UpperCamelCase = old['decoder/decoder_norm/scale'] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __UpperCamelCase = old['decoder/logits_dense/kernel'].T return new def lowercase__ ( __lowercase : Optional[Any] , __lowercase : bool ) -> int: """simple docstring""" __UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __UpperCamelCase = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __UpperCamelCase = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) __UpperCamelCase = state_dict['shared.weight'] return state_dict def lowercase__ ( __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = checkpoints.load_tax_checkpoint(__lowercase ) __UpperCamelCase = convert_tax_to_pytorch( __lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase , scalable_attention=__lowercase ) __UpperCamelCase = make_state_dict(__lowercase , __lowercase ) model.load_state_dict(__lowercase , strict=__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : bool = False , __lowercase : bool = False , ) -> Optional[int]: """simple docstring""" __UpperCamelCase = MTaConfig.from_json_file(__lowercase ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __UpperCamelCase = UMTaEncoderModel(__lowercase ) else: __UpperCamelCase = UMTaForConditionalGeneration(__lowercase ) # Load weights from tf checkpoint load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(__lowercase ) # Verify that we can load the checkpoint. model.from_pretrained(__lowercase ) print('Done' ) if __name__ == "__main__": a__ : List[Any] =argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) parser.add_argument( '''--scalable_attention''', action='''store_true''', help='''Whether the model uses scaled attention (umt5 model)''', default=False, ) a__ : List[str] =parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
53
1
'''simple docstring''' def lowercase__ ( __lowercase : int = 1000000 ) -> int: """simple docstring""" __UpperCamelCase = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , __lowercase ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
53
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ : List[Any] ="BlipImageProcessor" SCREAMING_SNAKE_CASE_ : Optional[int] =("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , __A : Optional[int] , __A : List[Any] ): __UpperCamelCase = False super().__init__(__A , __A ) __UpperCamelCase = self.image_processor def __call__( self : List[Any] , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ): if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: __UpperCamelCase = self.tokenizer __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) return text_encoding # add pixel_values __UpperCamelCase = self.image_processor(__A , return_tensors=__A ) if text is not None: __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) else: __UpperCamelCase = None if text_encoding is not None: encoding_image_processor.update(__A ) return encoding_image_processor def _lowerCamelCase ( self : List[Any] , *__A : Dict , **__A : Optional[int] ): return self.tokenizer.batch_decode(*__A , **__A ) def _lowerCamelCase ( self : List[Any] , *__A : List[str] , **__A : Dict ): return self.tokenizer.decode(*__A , **__A ) @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.tokenizer.model_input_names __UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
53
1
'''simple docstring''' import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def lowercase__ ( __lowercase : List[str] ) -> Optional[int]: """simple docstring""" __UpperCamelCase = fname.split(os.path.sep )[-1] return re.search(R'^(.*)_\d+\.jpg$' , __lowercase ).groups()[0] class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : List[Any] , __A : Dict , __A : Dict=None , __A : List[str]=None ): __UpperCamelCase = file_names __UpperCamelCase = image_transform __UpperCamelCase = label_to_id def __len__( self : int ): return len(self.file_names ) def __getitem__( self : Tuple , __A : List[str] ): __UpperCamelCase = self.file_names[idx] __UpperCamelCase = PIL.Image.open(__A ) __UpperCamelCase = raw_image.convert('RGB' ) if self.image_transform is not None: __UpperCamelCase = self.image_transform(__A ) __UpperCamelCase = extract_label(__A ) if self.label_to_id is not None: __UpperCamelCase = self.label_to_id[label] return {"image": image, "label": label} def lowercase__ ( __lowercase : Tuple , __lowercase : Tuple ) -> List[str]: """simple docstring""" if args.with_tracking: __UpperCamelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir ) else: __UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCamelCase = config['lr'] __UpperCamelCase = int(config['num_epochs'] ) __UpperCamelCase = int(config['seed'] ) __UpperCamelCase = int(config['batch_size'] ) __UpperCamelCase = config['image_size'] if not isinstance(__lowercase , (list, tuple) ): __UpperCamelCase = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , 'isdigit' ): if args.checkpointing_steps == "epoch": __UpperCamelCase = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): __UpperCamelCase = int(args.checkpointing_steps ) else: raise ValueError( F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' ) else: __UpperCamelCase = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: __UpperCamelCase = os.path.split(__lowercase )[-1].split('.' )[0] accelerator.init_trackers(__lowercase , __lowercase ) # Grab all the image filenames __UpperCamelCase = [os.path.join(args.data_dir , __lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )] # Build the label correspondences __UpperCamelCase = [extract_label(__lowercase ) for fname in file_names] __UpperCamelCase = list(set(__lowercase ) ) id_to_label.sort() __UpperCamelCase = {lbl: i for i, lbl in enumerate(__lowercase )} # Set the seed before splitting the data. np.random.seed(__lowercase ) torch.manual_seed(__lowercase ) torch.cuda.manual_seed_all(__lowercase ) # Split our filenames between train and validation __UpperCamelCase = np.random.permutation(len(__lowercase ) ) __UpperCamelCase = int(0.8 * len(__lowercase ) ) __UpperCamelCase = random_perm[:cut] __UpperCamelCase = random_perm[cut:] # For training we use a simple RandomResizedCrop __UpperCamelCase = Compose([RandomResizedCrop(__lowercase , scale=(0.5, 1.0) ), ToTensor()] ) __UpperCamelCase = PetsDataset( [file_names[i] for i in train_split] , image_transform=__lowercase , label_to_id=__lowercase ) # For evaluation, we use a deterministic Resize __UpperCamelCase = Compose([Resize(__lowercase ), ToTensor()] ) __UpperCamelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__lowercase , label_to_id=__lowercase ) # Instantiate dataloaders. __UpperCamelCase = DataLoader(__lowercase , shuffle=__lowercase , batch_size=__lowercase , num_workers=4 ) __UpperCamelCase = DataLoader(__lowercase , shuffle=__lowercase , batch_size=__lowercase , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCamelCase = create_model('resnet50d' , pretrained=__lowercase , num_classes=len(__lowercase ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __UpperCamelCase = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): __UpperCamelCase = False for param in model.get_classifier().parameters(): __UpperCamelCase = True # We normalize the batches of images to be a bit faster. __UpperCamelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device ) __UpperCamelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler __UpperCamelCase = OneCycleLR(optimizer=__lowercase , max_lr=__lowercase , epochs=__lowercase , steps_per_epoch=len(__lowercase ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) # We need to keep track of how many total steps we have iterated over __UpperCamelCase = 0 # We also need to keep track of the starting epoch so files are named properly __UpperCamelCase = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' ) accelerator.load_state(args.resume_from_checkpoint ) __UpperCamelCase = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint __UpperCamelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) __UpperCamelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` __UpperCamelCase = os.path.splitext(__lowercase )[0] if "epoch" in training_difference: __UpperCamelCase = int(training_difference.replace('epoch_' , '' ) ) + 1 __UpperCamelCase = None else: __UpperCamelCase = int(training_difference.replace('step_' , '' ) ) __UpperCamelCase = resume_step // len(__lowercase ) resume_step -= starting_epoch * len(__lowercase ) # Now we train the model for epoch in range(__lowercase , __lowercase ): model.train() if args.with_tracking: __UpperCamelCase = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step __UpperCamelCase = accelerator.skip_first_batches(__lowercase , __lowercase ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader __UpperCamelCase = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. __UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()} __UpperCamelCase = (batch['image'] - mean) / std __UpperCamelCase = model(__lowercase ) __UpperCamelCase = torch.nn.functional.cross_entropy(__lowercase , batch['label'] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(__lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(__lowercase , __lowercase ): __UpperCamelCase = F'''step_{overall_step}''' if overall_step % checkpointing_steps == 0: if args.output_dir is not None: __UpperCamelCase = os.path.join(args.output_dir , __lowercase ) accelerator.save_state(__lowercase ) model.eval() __UpperCamelCase = 0 __UpperCamelCase = 0 for step, batch in enumerate(__lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. __UpperCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()} __UpperCamelCase = (batch['image'] - mean) / std with torch.no_grad(): __UpperCamelCase = model(__lowercase ) __UpperCamelCase = outputs.argmax(dim=-1 ) __UpperCamelCase , __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['label']) ) __UpperCamelCase = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() __UpperCamelCase = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' ) if args.with_tracking: accelerator.log( { 'accuracy': 100 * eval_metric, 'train_loss': total_loss.item() / len(__lowercase ), 'epoch': epoch, } , step=__lowercase , ) if checkpointing_steps == "epoch": __UpperCamelCase = F'''epoch_{epoch}''' if args.output_dir is not None: __UpperCamelCase = os.path.join(args.output_dir , __lowercase ) accelerator.save_state(__lowercase ) if args.with_tracking: accelerator.end_training() def lowercase__ ( ) -> int: """simple docstring""" __UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument('--data_dir' , required=__lowercase , help='The data folder on disk.' ) parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' ) parser.add_argument( '--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) parser.add_argument( '--checkpointing_steps' , type=__lowercase , default=__lowercase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , ) parser.add_argument( '--output_dir' , type=__lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--resume_from_checkpoint' , type=__lowercase , default=__lowercase , help='If the training should continue from a checkpoint folder.' , ) parser.add_argument( '--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , ) parser.add_argument( '--project_dir' , type=__lowercase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , ) __UpperCamelCase = parser.parse_args() __UpperCamelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224} training_function(__lowercase , __lowercase ) if __name__ == "__main__": main()
53
'''simple docstring''' from __future__ import annotations from typing import Any class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case : """simple docstring""" def __init__( self : List[Any] , __A : Any ): __UpperCamelCase = data __UpperCamelCase = None def __iter__( self : Optional[Any] ): __UpperCamelCase = self __UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(__A ) yield node.data __UpperCamelCase = node.next_node @property def _lowerCamelCase ( self : List[str] ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": a__ : Dict =Node(1) a__ : Optional[int] =Node(2) a__ : List[str] =Node(3) a__ : Optional[int] =Node(4) print(root_node.has_loop) # False a__ : str =root_node.next_node print(root_node.has_loop) # True a__ : Optional[int] =Node(5) a__ : List[Any] =Node(6) a__ : int =Node(5) a__ : Tuple =Node(6) print(root_node.has_loop) # False a__ : str =Node(1) print(root_node.has_loop) # False
53
1
'''simple docstring''' def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : Optional[int] ) -> Any: """simple docstring""" if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(__lowercase , n - 1 , __lowercase ) * a) % mod else: __UpperCamelCase = binary_exponentiation(__lowercase , n / 2 , __lowercase ) return (b * b) % mod # a prime number a__ : Tuple =701 a__ : Union[str, Any] =1_000_000_000 a__ : List[Any] =10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
53
'''simple docstring''' a__ : Optional[Any] =256 # Modulus to hash a string a__ : Dict =1_000_003 def lowercase__ ( __lowercase : str , __lowercase : str ) -> bool: """simple docstring""" __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) if p_len > t_len: return False __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 1 # Calculating the hash of pattern and substring of text for i in range(__lowercase ): __UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __UpperCamelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __UpperCamelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def lowercase__ ( ) -> None: """simple docstring""" __UpperCamelCase = 'abc1abc12' __UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __UpperCamelCase = 'alskfjaldsk23adsfabcabc' assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase ) # Test 2) __UpperCamelCase = 'ABABX' __UpperCamelCase = 'ABABZABABYABABX' assert rabin_karp(__lowercase , __lowercase ) # Test 3) __UpperCamelCase = 'AAAB' __UpperCamelCase = 'ABAAAAAB' assert rabin_karp(__lowercase , __lowercase ) # Test 4) __UpperCamelCase = 'abcdabcy' __UpperCamelCase = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(__lowercase , __lowercase ) # Test 5) __UpperCamelCase = 'Lü' __UpperCamelCase = 'Lüsai' assert rabin_karp(__lowercase , __lowercase ) __UpperCamelCase = 'Lue' assert not rabin_karp(__lowercase , __lowercase ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
53
1
'''simple docstring''' import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( '''The `image_to_image.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionImg2ImgPipeline` instead.''' )
53
'''simple docstring''' from __future__ import annotations class snake_case : """simple docstring""" def __init__( self : Optional[int] , __A : list[list[int]] ): __UpperCamelCase = TypeError( 'Matrices must be formed from a list of zero or more lists containing at ' 'least one and the same number of values, each of which must be of type ' 'int or float.' ) if len(__A ) != 0: __UpperCamelCase = len(rows[0] ) if cols == 0: raise error for row in rows: if len(__A ) != cols: raise error for value in row: if not isinstance(__A , (int, float) ): raise error __UpperCamelCase = rows else: __UpperCamelCase = [] def _lowerCamelCase ( self : int ): return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def _lowerCamelCase ( self : str ): return len(self.rows ) @property def _lowerCamelCase ( self : Any ): return len(self.rows[0] ) @property def _lowerCamelCase ( self : Optional[Any] ): return (self.num_rows, self.num_columns) @property def _lowerCamelCase ( self : Dict ): return self.order[0] == self.order[1] def _lowerCamelCase ( self : Any ): __UpperCamelCase = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Any ): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def _lowerCamelCase ( self : List[str] ): return bool(self.determinant() ) def _lowerCamelCase ( self : Dict , __A : int , __A : int ): __UpperCamelCase = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(__A ).determinant() def _lowerCamelCase ( self : Dict , __A : int , __A : int ): if (row + column) % 2 == 0: return self.get_minor(__A , __A ) return -1 * self.get_minor(__A , __A ) def _lowerCamelCase ( self : List[str] ): return Matrix( [ [self.get_minor(__A , __A ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def _lowerCamelCase ( self : Union[str, Any] ): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = self.determinant() if not determinant: raise TypeError('Only matrices with a non-zero determinant have an inverse' ) return self.adjugate() * (1 / determinant) def __repr__( self : Optional[Any] ): return str(self.rows ) def __str__( self : Union[str, Any] ): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '[' + '. '.join([str(__A ) for value in row] ) + '.]' for row in self.rows ] ) + "]" ) def _lowerCamelCase ( self : List[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError('Row must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in row: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_columns: raise ValueError( 'Row must be equal in length to the other rows in the matrix' ) if position is None: self.rows.append(__A ) else: __UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:] def _lowerCamelCase ( self : Optional[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError( 'Column must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in column: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_rows: raise ValueError( 'Column must be equal in length to the other columns in the matrix' ) if position is None: __UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: __UpperCamelCase = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self : Tuple , __A : object ): if not isinstance(__A , __A ): return NotImplemented return self.rows == other.rows def __ne__( self : Any , __A : object ): return not self == other def __neg__( self : List[Any] ): return self * -1 def __add__( self : List[str] , __A : Matrix ): if self.order != other.order: raise ValueError('Addition requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self : str , __A : Matrix ): if self.order != other.order: raise ValueError('Subtraction requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self : str , __A : Matrix | int | float ): if isinstance(__A , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(__A , __A ): if self.num_columns != other.num_rows: raise ValueError( 'The number of columns in the first matrix must ' 'be equal to the number of rows in the second' ) return Matrix( [ [Matrix.dot_product(__A , __A ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( 'A Matrix can only be multiplied by an int, float, or another matrix' ) def __pow__( self : Union[str, Any] , __A : int ): if not isinstance(__A , __A ): raise TypeError('A Matrix can only be raised to the power of an int' ) if not self.is_square: raise ValueError('Only square matrices can be raised to a power' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( 'Only invertable matrices can be raised to a negative power' ) __UpperCamelCase = self for _ in range(other - 1 ): result *= self return result @classmethod def _lowerCamelCase ( cls : Tuple , __A : list[int] , __A : list[int] ): return sum(row[i] * column[i] for i in range(len(__A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
53
1
'''simple docstring''' a__ : Optional[Any] =256 # Modulus to hash a string a__ : Dict =1_000_003 def lowercase__ ( __lowercase : str , __lowercase : str ) -> bool: """simple docstring""" __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) if p_len > t_len: return False __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 1 # Calculating the hash of pattern and substring of text for i in range(__lowercase ): __UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __UpperCamelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __UpperCamelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def lowercase__ ( ) -> None: """simple docstring""" __UpperCamelCase = 'abc1abc12' __UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __UpperCamelCase = 'alskfjaldsk23adsfabcabc' assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase ) # Test 2) __UpperCamelCase = 'ABABX' __UpperCamelCase = 'ABABZABABYABABX' assert rabin_karp(__lowercase , __lowercase ) # Test 3) __UpperCamelCase = 'AAAB' __UpperCamelCase = 'ABAAAAAB' assert rabin_karp(__lowercase , __lowercase ) # Test 4) __UpperCamelCase = 'abcdabcy' __UpperCamelCase = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(__lowercase , __lowercase ) # Test 5) __UpperCamelCase = 'Lü' __UpperCamelCase = 'Lüsai' assert rabin_karp(__lowercase , __lowercase ) __UpperCamelCase = 'Lue' assert not rabin_karp(__lowercase , __lowercase ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
53
'''simple docstring''' import os import numpy import onnx def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Dict: """simple docstring""" __UpperCamelCase = a.name __UpperCamelCase = b.name __UpperCamelCase = '' __UpperCamelCase = '' __UpperCamelCase = a == b __UpperCamelCase = name_a __UpperCamelCase = name_b return res def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : List[Any] ) -> Optional[int]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(__lowercase , __lowercase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) _graph_replace_input_with(node_proto.attribute[1].g , __lowercase , __lowercase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) def lowercase__ ( __lowercase : int , __lowercase : List[Any] , __lowercase : Dict ) -> int: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(__lowercase , __lowercase , __lowercase ) def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : str ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __UpperCamelCase = inits[i].name __UpperCamelCase = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = os.path.dirname(__lowercase ) __UpperCamelCase = os.path.basename(__lowercase ) __UpperCamelCase = onnx.load(os.path.join(__lowercase , __lowercase ) ) __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = set() __UpperCamelCase = {} __UpperCamelCase = [] __UpperCamelCase = 0 for i in range(len(__lowercase ) ): if i in dup_set: continue for j in range(i + 1 , len(__lowercase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(__lowercase ) dup_set.add(__lowercase ) __UpperCamelCase = inits[j].data_type __UpperCamelCase = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , __lowercase ) total_reduced_size += mem_size __UpperCamelCase = inits[i].name __UpperCamelCase = inits[j].name if name_i in dup_map: dup_map[name_i].append(__lowercase ) else: __UpperCamelCase = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) __UpperCamelCase = sorted(__lowercase ) _remove_dup_initializers_from_model(__lowercase , __lowercase , __lowercase ) __UpperCamelCase = 'optimized_' + model_file_name __UpperCamelCase = os.path.join(__lowercase , __lowercase ) onnx.save(__lowercase , __lowercase ) return new_model
53
1
'''simple docstring''' import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] =LayoutLMTokenizer SCREAMING_SNAKE_CASE_ : Any =LayoutLMTokenizerFast SCREAMING_SNAKE_CASE_ : Union[str, Any] =True SCREAMING_SNAKE_CASE_ : Optional[int] =True def _lowerCamelCase ( self : Dict ): super().setUp() __UpperCamelCase = [ '[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def _lowerCamelCase ( self : Union[str, Any] , **__A : Dict ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__A ) def _lowerCamelCase ( self : Dict , __A : Optional[Any] ): __UpperCamelCase = 'UNwant\u00E9d,running' __UpperCamelCase = 'unwanted, running' return input_text, output_text def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = self.tokenizer_class(self.vocab_file ) __UpperCamelCase = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(__A , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [7, 4, 5, 1_0, 8, 9] ) def _lowerCamelCase ( self : List[Any] ): pass
53
'''simple docstring''' import random def lowercase__ ( __lowercase : list , __lowercase : Optional[Any] ) -> tuple: """simple docstring""" __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = [], [], [] for element in data: if element < pivot: less.append(__lowercase ) elif element > pivot: greater.append(__lowercase ) else: equal.append(__lowercase ) return less, equal, greater def lowercase__ ( __lowercase : list , __lowercase : int ) -> Dict: """simple docstring""" if index >= len(__lowercase ) or index < 0: return None __UpperCamelCase = items[random.randint(0 , len(__lowercase ) - 1 )] __UpperCamelCase = 0 __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = _partition(__lowercase , __lowercase ) __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__lowercase , __lowercase ) # must be in larger else: return quick_select(__lowercase , index - (m + count) )
53
1
'''simple docstring''' # flake8: noqa # Lint as: python3 a__ : Union[str, Any] =[ '''VerificationMode''', '''Version''', '''disable_progress_bar''', '''enable_progress_bar''', '''is_progress_bar_enabled''', '''experimental''', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
53
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowercase__ ( __lowercase : Any ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def lowercase__ ( __lowercase : Tuple ) -> int: """simple docstring""" __UpperCamelCase , __UpperCamelCase = emb.weight.shape __UpperCamelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) __UpperCamelCase = emb.weight.data return lin_layer def lowercase__ ( __lowercase : int , __lowercase : List[str]="facebook/mbart-large-en-ro" , __lowercase : str=False , __lowercase : List[Any]=False ) -> int: """simple docstring""" __UpperCamelCase = torch.load(__lowercase , map_location='cpu' )['model'] remove_ignore_keys_(__lowercase ) __UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0] __UpperCamelCase = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase ) if mbart_aa and finetuned: __UpperCamelCase = 'relu' __UpperCamelCase = state_dict['decoder.embed_tokens.weight'] __UpperCamelCase = MBartForConditionalGeneration(__lowercase ) model.model.load_state_dict(__lowercase ) if finetuned: __UpperCamelCase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a__ : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') a__ : Union[str, Any] =parser.parse_args() a__ : str =convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
53
1
'''simple docstring''' import math def lowercase__ ( __lowercase : list , __lowercase : int = 0 , __lowercase : int = 0 ) -> list: """simple docstring""" __UpperCamelCase = end or len(__lowercase ) for i in range(__lowercase , __lowercase ): __UpperCamelCase = i __UpperCamelCase = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __UpperCamelCase = array[temp_index - 1] temp_index -= 1 __UpperCamelCase = temp_index_value return array def lowercase__ ( __lowercase : list , __lowercase : int , __lowercase : int ) -> None: # Max Heap """simple docstring""" __UpperCamelCase = index __UpperCamelCase = 2 * index + 1 # Left Node __UpperCamelCase = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __UpperCamelCase = left_index if right_index < heap_size and array[largest] < array[right_index]: __UpperCamelCase = right_index if largest != index: __UpperCamelCase , __UpperCamelCase = array[largest], array[index] heapify(__lowercase , __lowercase , __lowercase ) def lowercase__ ( __lowercase : list ) -> list: """simple docstring""" __UpperCamelCase = len(__lowercase ) for i in range(n // 2 , -1 , -1 ): heapify(__lowercase , __lowercase , __lowercase ) for i in range(n - 1 , 0 , -1 ): __UpperCamelCase , __UpperCamelCase = array[0], array[i] heapify(__lowercase , 0 , __lowercase ) return array def lowercase__ ( __lowercase : list , __lowercase : int , __lowercase : int , __lowercase : int ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def lowercase__ ( __lowercase : list , __lowercase : int , __lowercase : int , __lowercase : int ) -> int: """simple docstring""" __UpperCamelCase = low __UpperCamelCase = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __UpperCamelCase , __UpperCamelCase = array[j], array[i] i += 1 def lowercase__ ( __lowercase : list ) -> list: """simple docstring""" if len(__lowercase ) == 0: return array __UpperCamelCase = 2 * math.ceil(math.loga(len(__lowercase ) ) ) __UpperCamelCase = 16 return intro_sort(__lowercase , 0 , len(__lowercase ) , __lowercase , __lowercase ) def lowercase__ ( __lowercase : list , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : int ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(__lowercase ) max_depth -= 1 __UpperCamelCase = median_of_a(__lowercase , __lowercase , start + ((end - start) // 2) + 1 , end - 1 ) __UpperCamelCase = partition(__lowercase , __lowercase , __lowercase , __lowercase ) intro_sort(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) __UpperCamelCase = p return insertion_sort(__lowercase , __lowercase , __lowercase ) if __name__ == "__main__": import doctest doctest.testmod() a__ : Dict =input('''Enter numbers separated by a comma : ''').strip() a__ : Optional[int] =[float(item) for item in user_input.split(''',''')] print(sort(unsorted))
53
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : Any , __A : Dict , __A : str , __A : List[Any]=1_0_2_4 , __A : Tuple=1_0_2_4 , __A : str=3.6 ): __UpperCamelCase = tokenizer __UpperCamelCase = tokenizer.bos_token_id __UpperCamelCase = dataset __UpperCamelCase = seq_length __UpperCamelCase = seq_length * chars_per_token * num_of_sequences def __iter__( self : Any ): __UpperCamelCase = iter(self.dataset ) __UpperCamelCase = True while more_examples: __UpperCamelCase , __UpperCamelCase = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(__A )['content'] ) buffer_len += len(buffer[-1] ) except StopIteration: __UpperCamelCase = False break __UpperCamelCase = tokenizer(__A , truncation=__A )['input_ids'] __UpperCamelCase = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(__A ) , self.seq_length ): __UpperCamelCase = all_token_ids[i : i + self.seq_length] if len(__A ) == self.seq_length: yield torch.tensor(__A ) def lowercase__ ( __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = {'streaming': True} __UpperCamelCase = load_dataset(args.dataset_name , split='train' , **__lowercase ) __UpperCamelCase = ConstantLengthDataset(__lowercase , __lowercase , seq_length=args.seq_length ) __UpperCamelCase = DataLoader(__lowercase , batch_size=args.batch_size ) return eval_dataloader def lowercase__ ( __lowercase : Tuple ) -> Optional[Any]: """simple docstring""" model.eval() __UpperCamelCase = [] for step, batch in enumerate(__lowercase ): with torch.no_grad(): __UpperCamelCase = model(__lowercase , labels=__lowercase ) __UpperCamelCase = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__lowercase ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break __UpperCamelCase = torch.mean(torch.cat(__lowercase ) ) try: __UpperCamelCase = torch.exp(__lowercase ) except OverflowError: __UpperCamelCase = float('inf' ) return loss.item(), perplexity.item() # Setup Accelerator a__ : int =Accelerator() # Parse configuration a__ : Dict =HfArgumentParser(EvaluationArguments) a__ : Union[str, Any] =parser.parse_args() set_seed(args.seed) # Logging a__ : List[Any] =logging.getLogger(__name__) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) # Load model and tokenizer a__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(args.model_ckpt) a__ : List[Any] =AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader a__ : Union[str, Any] =create_dataloader(args) # Prepare everything with our `accelerator`. a__ , a__ : List[str] =accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('''Evaluating and saving model after training''') a__ , a__ : Any =evaluate(args) logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
53
1
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() a__ : List[Any] =logging.get_logger(__name__) def lowercase__ ( __lowercase : Dict ) -> Optional[int]: """simple docstring""" print('Loading config file...' ) def flatten_yaml_as_dict(__lowercase : List[Any] , __lowercase : int="" , __lowercase : Optional[int]="." ): __UpperCamelCase = [] for k, v in d.items(): __UpperCamelCase = parent_key + sep + k if parent_key else k if isinstance(__lowercase , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(__lowercase , __lowercase , sep=__lowercase ).items() ) else: items.append((new_key, v) ) return dict(__lowercase ) __UpperCamelCase = argparse.Namespace() with open(__lowercase , 'r' ) as yaml_file: try: __UpperCamelCase = yaml.load(__lowercase , Loader=yaml.FullLoader ) __UpperCamelCase = flatten_yaml_as_dict(__lowercase ) for k, v in flat_cfg.items(): setattr(__lowercase , __lowercase , __lowercase ) except yaml.YAMLError as exc: logger.error('Error while loading config file: {}. Error message: {}'.format(__lowercase , str(__lowercase ) ) ) return config def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Optional[int]: """simple docstring""" __UpperCamelCase = MobileViTVaConfig() __UpperCamelCase = False # dataset if task_name.startswith('imagenet1k_' ): __UpperCamelCase = 1000 if int(task_name.strip().split('_' )[-1] ) == 384: __UpperCamelCase = 384 else: __UpperCamelCase = 256 __UpperCamelCase = 'imagenet-1k-id2label.json' elif task_name.startswith('imagenet21k_to_1k_' ): __UpperCamelCase = 21000 if int(task_name.strip().split('_' )[-1] ) == 384: __UpperCamelCase = 384 else: __UpperCamelCase = 256 __UpperCamelCase = 'imagenet-22k-id2label.json' elif task_name.startswith('ade20k_' ): __UpperCamelCase = 151 __UpperCamelCase = 512 __UpperCamelCase = 'ade20k-id2label.json' __UpperCamelCase = True elif task_name.startswith('voc_' ): __UpperCamelCase = 21 __UpperCamelCase = 512 __UpperCamelCase = 'pascal-voc-id2label.json' __UpperCamelCase = True # orig_config __UpperCamelCase = load_orig_config_file(__lowercase ) assert getattr(__lowercase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model" __UpperCamelCase = getattr(__lowercase , 'model.classification.mitv2.width_multiplier' , 1.0 ) assert ( getattr(__lowercase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" __UpperCamelCase = getattr(__lowercase , 'model.classification.activation.name' , 'swish' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: __UpperCamelCase = getattr(__lowercase , 'model.segmentation.output_stride' , 16 ) if "_deeplabv3" in task_name: __UpperCamelCase = getattr(__lowercase , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] ) __UpperCamelCase = getattr(__lowercase , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 ) __UpperCamelCase = getattr(__lowercase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 ) # id2label __UpperCamelCase = 'huggingface/label-files' __UpperCamelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset' ) , 'r' ) ) __UpperCamelCase = {int(__lowercase ): v for k, v in idalabel.items()} __UpperCamelCase = idalabel __UpperCamelCase = {v: k for k, v in idalabel.items()} return config def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Any: """simple docstring""" __UpperCamelCase = dct.pop(__lowercase ) __UpperCamelCase = val def lowercase__ ( __lowercase : Optional[int] , __lowercase : str=False ) -> Union[str, Any]: """simple docstring""" if base_model: __UpperCamelCase = '' else: __UpperCamelCase = 'mobilevitv2.' __UpperCamelCase = [] for k in state_dict.keys(): if k[:8] == "encoder.": __UpperCamelCase = k[8:] else: __UpperCamelCase = k if ".block." in k: __UpperCamelCase = k_new.replace('.block.' , '.' ) if ".conv." in k: __UpperCamelCase = k_new.replace('.conv.' , '.convolution.' ) if ".norm." in k: __UpperCamelCase = k_new.replace('.norm.' , '.normalization.' ) if "conv_1." in k: __UpperCamelCase = k_new.replace('conv_1.' , F'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if F'''layer_{i}.''' in k: __UpperCamelCase = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: __UpperCamelCase = k_new.replace('.exp_1x1.' , '.expand_1x1.' ) if ".red_1x1." in k: __UpperCamelCase = k_new.replace('.red_1x1.' , '.reduce_1x1.' ) for i in [3, 4, 5]: if F'''layer_{i}.0.''' in k: __UpperCamelCase = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if F'''layer_{i}.1.local_rep.0.''' in k: __UpperCamelCase = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if F'''layer_{i}.1.local_rep.1.''' in k: __UpperCamelCase = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: __UpperCamelCase = [0, 1] elif i == 4: __UpperCamelCase = [0, 1, 2, 3] elif i == 5: __UpperCamelCase = [0, 1, 2] for j in j_in: if F'''layer_{i}.1.global_rep.{j}.''' in k: __UpperCamelCase = k_new.replace( F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if F'''layer_{i}.1.global_rep.{j+1}.''' in k: __UpperCamelCase = k_new.replace( F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if F'''layer_{i}.1.conv_proj.''' in k: __UpperCamelCase = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: __UpperCamelCase = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' ) if "pre_norm_attn.1." in k: __UpperCamelCase = k_new.replace('pre_norm_attn.1.' , 'attention.' ) if "pre_norm_ffn.0." in k: __UpperCamelCase = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' ) if "pre_norm_ffn.1." in k: __UpperCamelCase = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' ) if "pre_norm_ffn.3." in k: __UpperCamelCase = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' ) if "classifier.1." in k: __UpperCamelCase = k_new.replace('classifier.1.' , 'classifier.' ) if "seg_head." in k: __UpperCamelCase = k_new.replace('seg_head.' , 'segmentation_head.' ) if ".aspp_layer." in k: __UpperCamelCase = k_new.replace('.aspp_layer.' , '.' ) if ".aspp_pool." in k: __UpperCamelCase = k_new.replace('.aspp_pool.' , '.' ) rename_keys.append((k, k_new) ) return rename_keys def lowercase__ ( __lowercase : Dict ) -> str: """simple docstring""" __UpperCamelCase = [] for k in state_dict.keys(): if k.startswith('seg_head.aux_head.' ): keys_to_ignore.append(__lowercase ) for k in keys_to_ignore: state_dict.pop(__lowercase , __lowercase ) def lowercase__ ( ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" __UpperCamelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def lowercase__ ( __lowercase : Any , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : int ) -> Optional[Any]: """simple docstring""" __UpperCamelCase = get_mobilevitva_config(__lowercase , __lowercase ) # load original state_dict __UpperCamelCase = torch.load(__lowercase , map_location='cpu' ) # load huggingface model if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ): __UpperCamelCase = MobileViTVaForSemanticSegmentation(__lowercase ).eval() __UpperCamelCase = False else: __UpperCamelCase = MobileViTVaForImageClassification(__lowercase ).eval() __UpperCamelCase = False # remove and rename some keys of load the original model __UpperCamelCase = checkpoint remove_unused_keys(__lowercase ) __UpperCamelCase = create_rename_keys(__lowercase , base_model=__lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) # load modified state_dict model.load_state_dict(__lowercase ) # Check outputs on an image, prepared by MobileViTImageProcessor __UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) __UpperCamelCase = image_processor(images=prepare_img() , return_tensors='pt' ) __UpperCamelCase = model(**__lowercase ) # verify classification model if task_name.startswith('imagenet' ): __UpperCamelCase = outputs.logits __UpperCamelCase = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0: # expected_logits for base variant __UpperCamelCase = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ) assert torch.allclose(logits[0, :3] , __lowercase , atol=1e-4 ) Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowercase ) if __name__ == "__main__": a__ : Optional[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) a__ : Union[str, Any] =parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
53
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging a__ : Any =logging.get_logger(__name__) a__ : Optional[Any] ={ '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict ="gpt_neo" SCREAMING_SNAKE_CASE_ : Optional[int] =["past_key_values"] SCREAMING_SNAKE_CASE_ : List[Any] ={"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Union[str, Any] , __A : Union[str, Any]=5_0_2_5_7 , __A : Any=2_0_4_8 , __A : Optional[Any]=2_0_4_8 , __A : Any=2_4 , __A : Union[str, Any]=[[["global", "local"], 1_2]] , __A : str=1_6 , __A : Optional[int]=None , __A : Union[str, Any]=2_5_6 , __A : Any="gelu_new" , __A : Dict=0.0 , __A : Optional[int]=0.0 , __A : int=0.0 , __A : List[str]=0.1 , __A : Any=1e-5 , __A : int=0.02 , __A : List[str]=True , __A : Tuple=5_0_2_5_6 , __A : Optional[Any]=5_0_2_5_6 , **__A : Optional[Any] , ): __UpperCamelCase = vocab_size __UpperCamelCase = max_position_embeddings __UpperCamelCase = hidden_size __UpperCamelCase = num_layers __UpperCamelCase = num_heads __UpperCamelCase = intermediate_size __UpperCamelCase = window_size __UpperCamelCase = activation_function __UpperCamelCase = resid_dropout __UpperCamelCase = embed_dropout __UpperCamelCase = attention_dropout __UpperCamelCase = classifier_dropout __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = initializer_range __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id __UpperCamelCase = attention_types __UpperCamelCase = self.expand_attention_types_params(__A ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=__A , eos_token_id=__A , **__A ) @staticmethod def _lowerCamelCase ( __A : Tuple ): __UpperCamelCase = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase__ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Any: """simple docstring""" import torch __UpperCamelCase = input.size() __UpperCamelCase = len(__lowercase ) __UpperCamelCase = shape[dimension] __UpperCamelCase = torch.arange(0 , __lowercase , __lowercase ) __UpperCamelCase = torch.div(sizedim - size , __lowercase , rounding_mode='floor' ) + 1 __UpperCamelCase = torch.arange(__lowercase ) + low_indices[:min_length][:, None] __UpperCamelCase = [slice(__lowercase )] * rank __UpperCamelCase = indices __UpperCamelCase = input[s] __UpperCamelCase = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Optional[int]: """simple docstring""" import torch __UpperCamelCase = torch.arange(1 , __lowercase ) __UpperCamelCase = torch.remainder(__lowercase , __lowercase ) __UpperCamelCase = remainders == 0 __UpperCamelCase = candidates[divisor_indices] __UpperCamelCase = torch.max(__lowercase ) return largest_divisor, torch.div(__lowercase , __lowercase , rounding_mode='floor' ) class snake_case ( __lowerCamelCase ): """simple docstring""" @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(__A , direction='inputs' ) __UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return common_inputs @property def _lowerCamelCase ( self : int ): return self._config.num_heads def _lowerCamelCase ( self : List[str] , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ): __UpperCamelCase = super(__A , self ).generate_dummy_inputs( __A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A ) # We need to order the input in the way they appears in the forward() __UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCamelCase = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] __UpperCamelCase = common_inputs['attention_mask'] if self.use_past: __UpperCamelCase = ordered_inputs['attention_mask'].dtype __UpperCamelCase = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 ) return ordered_inputs @property def _lowerCamelCase ( self : Dict ): return 1_3
53
1
'''simple docstring''' from math import factorial class snake_case : """simple docstring""" def __init__( self : List[str] , __A : Any , __A : Tuple ): __UpperCamelCase = real if isinstance(__A , __A ): __UpperCamelCase = [1] * rank else: __UpperCamelCase = rank def __repr__( self : Any ): return ( f'''{self.real}+''' f'''{'+'.join(str(__A )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}''' ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , __A ) def __add__( self : List[Any] , __A : Any ): if not isinstance(__A , __A ): return Dual(self.real + other , self.duals ) __UpperCamelCase = self.duals.copy() __UpperCamelCase = other.duals.copy() if len(__A ) > len(__A ): o_dual.extend([1] * (len(__A ) - len(__A )) ) elif len(__A ) < len(__A ): s_dual.extend([1] * (len(__A ) - len(__A )) ) __UpperCamelCase = [] for i in range(len(__A ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , __A ) SCREAMING_SNAKE_CASE_ : Tuple =__add__ def __sub__( self : List[str] , __A : List[Any] ): return self + other * -1 def __mul__( self : Tuple , __A : List[str] ): if not isinstance(__A , __A ): __UpperCamelCase = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , __A ) __UpperCamelCase = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , __A ) SCREAMING_SNAKE_CASE_ : Optional[Any] =__mul__ def __truediv__( self : int , __A : List[str] ): if not isinstance(__A , __A ): __UpperCamelCase = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , __A ) raise ValueError def __floordiv__( self : Any , __A : List[Any] ): if not isinstance(__A , __A ): __UpperCamelCase = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , __A ) raise ValueError def __pow__( self : str , __A : Optional[int] ): if n < 0 or isinstance(__A , __A ): raise ValueError('power must be a positive integer' ) if n == 0: return 1 if n == 1: return self __UpperCamelCase = self for _ in range(n - 1 ): x *= self return x def lowercase__ ( __lowercase : str , __lowercase : Optional[int] , __lowercase : List[str] ) -> Dict: """simple docstring""" if not callable(__lowercase ): raise ValueError('differentiate() requires a function as input for func' ) if not isinstance(__lowercase , (float, int) ): raise ValueError('differentiate() requires a float as input for position' ) if not isinstance(__lowercase , __lowercase ): raise ValueError('differentiate() requires an int as input for order' ) __UpperCamelCase = Dual(__lowercase , 1 ) __UpperCamelCase = func(__lowercase ) if order == 0: return result.real return result.duals[order - 1] * factorial(__lowercase ) if __name__ == "__main__": import doctest doctest.testmod() def lowercase__ ( __lowercase : Optional[int] ) -> List[Any]: """simple docstring""" return y**2 * y**4 print(differentiate(f, 9, 2))
53
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa" SCREAMING_SNAKE_CASE_ : Dict =( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) SCREAMING_SNAKE_CASE_ : List[str] ="document_qa" SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"] SCREAMING_SNAKE_CASE_ : Any =["text"] def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ): if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*__A , **__A ) def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ): __UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' __UpperCamelCase = task_prompt.replace('{user_input}' , __A ) __UpperCamelCase = self.pre_processor.tokenizer( __A , add_special_tokens=__A , return_tensors='pt' ).input_ids __UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ): return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences def _lowerCamelCase ( self : Tuple , __A : List[Any] ): __UpperCamelCase = self.pre_processor.batch_decode(__A )[0] __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) __UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token __UpperCamelCase = self.pre_processor.tokenajson(__A ) return sequence["answer"]
53
1
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def lowercase__ ( __lowercase : SplitDict ) -> int: """simple docstring""" __UpperCamelCase = split_dict._to_yaml_list() assert len(__lowercase ) == len(__lowercase ) __UpperCamelCase = SplitDict._from_yaml_list(__lowercase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump __UpperCamelCase = None # the split name of split_dict takes over the name of the split info object __UpperCamelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] ) def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" __UpperCamelCase = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
53
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
53
1
'''simple docstring''' import os import numpy import onnx def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Dict: """simple docstring""" __UpperCamelCase = a.name __UpperCamelCase = b.name __UpperCamelCase = '' __UpperCamelCase = '' __UpperCamelCase = a == b __UpperCamelCase = name_a __UpperCamelCase = name_b return res def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : List[Any] ) -> Optional[int]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(__lowercase , __lowercase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) _graph_replace_input_with(node_proto.attribute[1].g , __lowercase , __lowercase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) def lowercase__ ( __lowercase : int , __lowercase : List[Any] , __lowercase : Dict ) -> int: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(__lowercase , __lowercase , __lowercase ) def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : str ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __UpperCamelCase = inits[i].name __UpperCamelCase = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = os.path.dirname(__lowercase ) __UpperCamelCase = os.path.basename(__lowercase ) __UpperCamelCase = onnx.load(os.path.join(__lowercase , __lowercase ) ) __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = set() __UpperCamelCase = {} __UpperCamelCase = [] __UpperCamelCase = 0 for i in range(len(__lowercase ) ): if i in dup_set: continue for j in range(i + 1 , len(__lowercase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(__lowercase ) dup_set.add(__lowercase ) __UpperCamelCase = inits[j].data_type __UpperCamelCase = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , __lowercase ) total_reduced_size += mem_size __UpperCamelCase = inits[i].name __UpperCamelCase = inits[j].name if name_i in dup_map: dup_map[name_i].append(__lowercase ) else: __UpperCamelCase = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) __UpperCamelCase = sorted(__lowercase ) _remove_dup_initializers_from_model(__lowercase , __lowercase , __lowercase ) __UpperCamelCase = 'optimized_' + model_file_name __UpperCamelCase = os.path.join(__lowercase , __lowercase ) onnx.save(__lowercase , __lowercase ) return new_model
53
'''simple docstring''' import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase__ ( __lowercase : Features ) -> Optional[int]: """simple docstring""" __UpperCamelCase = np.inf def set_batch_size(__lowercase : FeatureType ) -> None: nonlocal batch_size if isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary": __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__lowercase , __lowercase ) return None if batch_size is np.inf else batch_size class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : List[str] , __A : NestedDataStructureLike[PathLike] , __A : Optional[NamedSplit] = None , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[int] = None , **__A : Dict , ): super().__init__( __A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , ) __UpperCamelCase = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths} __UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1] __UpperCamelCase = Parquet( cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , ) def _lowerCamelCase ( self : Optional[int] ): # Build iterable dataset if self.streaming: __UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None self.builder.download_and_prepare( download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , ) __UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=__A , in_memory=self.keep_in_memory ) return dataset class snake_case : """simple docstring""" def __init__( self : List[str] , __A : Dataset , __A : Union[PathLike, BinaryIO] , __A : Optional[int] = None , **__A : Dict , ): __UpperCamelCase = dataset __UpperCamelCase = path_or_buf __UpperCamelCase = batch_size or get_writer_batch_size(dataset.features ) __UpperCamelCase = parquet_writer_kwargs def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , 'wb+' ) as buffer: __UpperCamelCase = self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs ) else: __UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs ) return written def _lowerCamelCase ( self : List[str] , __A : BinaryIO , __A : int , **__A : List[str] ): __UpperCamelCase = 0 __UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __A ) __UpperCamelCase = self.dataset.features.arrow_schema __UpperCamelCase = pq.ParquetWriter(__A , schema=__A , **__A ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): __UpperCamelCase = query_table( table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__A ) written += batch.nbytes writer.close() return written
53
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class snake_case : """simple docstring""" def _lowerCamelCase ( self : Any , __A : Optional[Any] , __A : Any , __A : Union[str, Any] ): return None class snake_case : """simple docstring""" def _lowerCamelCase ( self : Any , __A : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Optional[int] ): return None class snake_case ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str =[ # (model_name, model_kwargs) ("bert-base-cased", {}), ("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _lowerCamelCase ( self : Optional[Any] ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__A , 'tf' , 1_2 , **__A ) @require_torch @slow def _lowerCamelCase ( self : Optional[Any] ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__A , 'pt' , 1_2 , **__A ) @require_torch @slow def _lowerCamelCase ( self : Any ): from transformers import BertModel __UpperCamelCase = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(__A ) ) vocab_file.flush() __UpperCamelCase = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: __UpperCamelCase = BertModel(BertConfig(vocab_size=len(__A ) ) ) model.save_pretrained(__A ) self._test_export(__A , 'pt' , 1_2 , __A ) @require_tf @slow def _lowerCamelCase ( self : List[str] ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: __UpperCamelCase = self._test_export(__A , 'tf' , 1_2 , **__A ) __UpperCamelCase = quantize(Path(__A ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__A ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def _lowerCamelCase ( self : int ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: __UpperCamelCase = self._test_export(__A , 'pt' , 1_2 , **__A ) __UpperCamelCase = quantize(__A ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__A ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def _lowerCamelCase ( self : List[str] , __A : Union[str, Any] , __A : Any , __A : str , __A : Tuple=None , **__A : Tuple ): try: # Compute path with TemporaryDirectory() as tempdir: __UpperCamelCase = Path(__A ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__A , __A , __A , __A , __A , **__A ) return path except Exception as e: self.fail(__A ) @require_torch @require_tokenizers @slow def _lowerCamelCase ( self : Optional[Any] ): from transformers import BertModel __UpperCamelCase = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) __UpperCamelCase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__A , __A , 'pt' ) @require_tf @require_tokenizers @slow def _lowerCamelCase ( self : Optional[Any] ): from transformers import TFBertModel __UpperCamelCase = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) __UpperCamelCase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__A , __A , 'tf' ) def _lowerCamelCase ( self : Tuple , __A : int , __A : Optional[int] , __A : int ): __UpperCamelCase = FeatureExtractionPipeline(__A , __A ) __UpperCamelCase = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = infer_shapes(__A , __A ) # Assert all variables are present self.assertEqual(len(__A ) , len(__A ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __A ) self.assertSequenceEqual(variable_names[3:] , __A ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] , {0: 'batch'} ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = ['input_ids', 'attention_mask', 'token_type_ids'] __UpperCamelCase = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} __UpperCamelCase , __UpperCamelCase = ensure_valid_input(FuncContiguousArgs() , __A , __A ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__A ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__A ) , set(__A ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__A , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) __UpperCamelCase , __UpperCamelCase = ensure_valid_input(FuncNonContiguousArgs() , __A , __A ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__A ) , 1 ) self.assertEqual(len(__A ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] , 'input_ids' ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
53
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def lowercase__ ( __lowercase : SplitDict ) -> int: """simple docstring""" __UpperCamelCase = split_dict._to_yaml_list() assert len(__lowercase ) == len(__lowercase ) __UpperCamelCase = SplitDict._from_yaml_list(__lowercase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump __UpperCamelCase = None # the split name of split_dict takes over the name of the split info object __UpperCamelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] ) def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" __UpperCamelCase = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
53
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer a__ : Union[str, Any] =logging.get_logger(__name__) a__ : Optional[int] ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} a__ : Dict ={ '''vocab_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json''' ), }, } a__ : List[str] ={ '''yjernite/retribert-base-uncased''': 512, } a__ : Optional[int] ={ '''yjernite/retribert-base-uncased''': {'''do_lower_case''': True}, } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : List[Any] =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Any =PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE_ : Tuple =RetriBertTokenizer SCREAMING_SNAKE_CASE_ : List[str] =["input_ids", "attention_mask"] def __init__( self : str , __A : int=None , __A : int=None , __A : int=True , __A : Dict="[UNK]" , __A : List[str]="[SEP]" , __A : Optional[Any]="[PAD]" , __A : Optional[int]="[CLS]" , __A : int="[MASK]" , __A : List[Any]=True , __A : Dict=None , **__A : int , ): super().__init__( __A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , ) __UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , __A ) != do_lower_case or normalizer_state.get('strip_accents' , __A ) != strip_accents or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars ): __UpperCamelCase = getattr(__A , normalizer_state.pop('type' ) ) __UpperCamelCase = do_lower_case __UpperCamelCase = strip_accents __UpperCamelCase = tokenize_chinese_chars __UpperCamelCase = normalizer_class(**__A ) __UpperCamelCase = do_lower_case def _lowerCamelCase ( self : Dict , __A : Dict , __A : Any=None ): __UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowerCamelCase ( self : Optional[int] , __A : List[int] , __A : Optional[List[int]] = None ): __UpperCamelCase = [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCamelCase ( self : List[Any] , __A : str , __A : Optional[str] = None ): __UpperCamelCase = self._tokenizer.model.save(__A , name=__A ) return tuple(__A )
53
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[str] ={ '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any =[ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
1
'''simple docstring''' import math def lowercase__ ( __lowercase : int ) -> int: """simple docstring""" if not isinstance(__lowercase , __lowercase ): __UpperCamelCase = F'''Input value of [number={number}] must be an integer''' raise TypeError(__lowercase ) if number < 1: __UpperCamelCase = F'''Input value of [number={number}] must be > 0''' raise ValueError(__lowercase ) elif number == 1: return 3 elif number == 2: return 5 else: __UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2 __UpperCamelCase = [3, 5] __UpperCamelCase = 2 __UpperCamelCase = 3 for block in range(1 , __lowercase ): for _ in range(__lowercase ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): a__ : str =0 try: a__ : Dict =proth(number) except ValueError: print(f'ValueError: there is no {number}th Proth number') continue print(f'The {number}th Proth number: {value}')
53
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a__ : str =logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str =["input_features", "attention_mask"] def __init__( self : Union[str, Any] , __A : Optional[int]=8_0 , __A : Tuple=1_6_0_0_0 , __A : Optional[Any]=8_0 , __A : Any=0.0 , __A : Any=True , __A : List[str]=True , __A : str=True , **__A : List[Any] , ): super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A ) __UpperCamelCase = num_mel_bins __UpperCamelCase = do_ceptral_normalize __UpperCamelCase = normalize_means __UpperCamelCase = normalize_vars __UpperCamelCase = True def _lowerCamelCase ( self : Union[str, Any] , __A : np.ndarray , ): __UpperCamelCase = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers __UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 ) __UpperCamelCase = ta_kaldi.fbank(__A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _lowerCamelCase ( __A : np.ndarray , __A : int , __A : Optional[bool] = True , __A : Optional[bool] = True , __A : float = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: __UpperCamelCase = x[:input_length].mean(axis=0 ) __UpperCamelCase = np.subtract(__A , __A ) if normalize_vars: __UpperCamelCase = x[:input_length].std(axis=0 ) __UpperCamelCase = np.divide(__A , __A ) if input_length < x.shape[0]: __UpperCamelCase = padding_value # make sure array is in float32 __UpperCamelCase = x.astype(np.floataa ) return x def _lowerCamelCase ( self : int , __A : List[np.ndarray] , __A : Optional[np.ndarray] = None ): __UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__A , __A , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(__A , __A ) ] def __call__( self : List[Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : Union[bool, str, PaddingStrategy] = False , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[int] = None , __A : Optional[bool] = None , **__A : Dict , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __UpperCamelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) __UpperCamelCase = is_batched_numpy or ( isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__A , np.ndarray ): __UpperCamelCase = np.asarray(__A , dtype=np.floataa ) elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __UpperCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __UpperCamelCase = [raw_speech] # extract fbank features __UpperCamelCase = [self._extract_fbank_features(__A ) for waveform in raw_speech] # convert into correct format for padding __UpperCamelCase = BatchFeature({'input_features': features} ) __UpperCamelCase = self.pad( __A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , ) # make sure list is in array format __UpperCamelCase = padded_inputs.get('input_features' ) if isinstance(input_features[0] , __A ): __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_features] __UpperCamelCase = padded_inputs.get('attention_mask' ) if attention_mask is not None: __UpperCamelCase = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __UpperCamelCase = ( np.array(__A , dtype=np.intaa ) if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD else None ) __UpperCamelCase = self.normalize( padded_inputs['input_features'] , attention_mask=__A ) if return_tensors is not None: __UpperCamelCase = padded_inputs.convert_to_tensors(__A ) return padded_inputs
53
1
'''simple docstring''' import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def lowercase__ ( __lowercase : Optional[Any] ) -> List[Any]: """simple docstring""" __UpperCamelCase = torch.exp(__lowercase ) __UpperCamelCase = torch.sum(__lowercase , dim=1 ) # sum of exp(x_i) __UpperCamelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(__lowercase ) - B / A class snake_case ( nn.Module ): """simple docstring""" def __init__( self : Any , __A : Optional[int] ): super().__init__() __UpperCamelCase = config.output_attentions __UpperCamelCase = config.output_hidden_states __UpperCamelCase = nn.ModuleList([BertLayer(__A ) for _ in range(config.num_hidden_layers )] ) __UpperCamelCase = nn.ModuleList([BertHighway(__A ) for _ in range(config.num_hidden_layers )] ) __UpperCamelCase = [-1 for _ in range(config.num_hidden_layers )] def _lowerCamelCase ( self : Optional[Any] , __A : List[Any] ): if (type(__A ) is float) or (type(__A ) is int): for i in range(len(self.early_exit_entropy ) ): __UpperCamelCase = x else: __UpperCamelCase = x def _lowerCamelCase ( self : str , __A : Tuple ): __UpperCamelCase = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def _lowerCamelCase ( self : List[str] , __A : List[str] , __A : Optional[Any]=None , __A : Optional[Any]=None , __A : Union[str, Any]=None , __A : str=None , ): __UpperCamelCase = () __UpperCamelCase = () __UpperCamelCase = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: __UpperCamelCase = all_hidden_states + (hidden_states,) __UpperCamelCase = layer_module( __A , __A , head_mask[i] , __A , __A ) __UpperCamelCase = layer_outputs[0] if self.output_attentions: __UpperCamelCase = all_attentions + (layer_outputs[1],) __UpperCamelCase = (hidden_states,) if self.output_hidden_states: __UpperCamelCase = current_outputs + (all_hidden_states,) if self.output_attentions: __UpperCamelCase = current_outputs + (all_attentions,) __UpperCamelCase = self.highway[i](__A ) # logits, pooled_output if not self.training: __UpperCamelCase = highway_exit[0] __UpperCamelCase = entropy(__A ) __UpperCamelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy __UpperCamelCase = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: __UpperCamelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(__A , i + 1 ) else: __UpperCamelCase = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: __UpperCamelCase = all_hidden_states + (hidden_states,) __UpperCamelCase = (hidden_states,) if self.output_hidden_states: __UpperCamelCase = outputs + (all_hidden_states,) if self.output_attentions: __UpperCamelCase = outputs + (all_attentions,) __UpperCamelCase = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( "The Bert Model transformer with early exiting (DeeBERT). " , __lowerCamelCase , ) class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : int , __A : int ): super().__init__(__A ) __UpperCamelCase = config __UpperCamelCase = BertEmbeddings(__A ) __UpperCamelCase = DeeBertEncoder(__A ) __UpperCamelCase = BertPooler(__A ) self.init_weights() def _lowerCamelCase ( self : Union[str, Any] ): self.encoder.init_highway_pooler(self.pooler ) def _lowerCamelCase ( self : List[str] ): return self.embeddings.word_embeddings def _lowerCamelCase ( self : Optional[Any] , __A : Union[str, Any] ): __UpperCamelCase = value def _lowerCamelCase ( self : Dict , __A : List[Any] ): for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(__A ) @add_start_docstrings_to_model_forward(__A ) def _lowerCamelCase ( self : List[Any] , __A : Any=None , __A : Any=None , __A : Union[str, Any]=None , __A : Any=None , __A : Optional[int]=None , __A : Tuple=None , __A : Any=None , __A : List[str]=None , ): if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' ) elif input_ids is not None: __UpperCamelCase = input_ids.size() elif inputs_embeds is not None: __UpperCamelCase = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds' ) __UpperCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __UpperCamelCase = torch.ones(__A , device=__A ) if encoder_attention_mask is None: __UpperCamelCase = torch.ones(__A , device=__A ) if token_type_ids is None: __UpperCamelCase = torch.zeros(__A , dtype=torch.long , device=__A ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __UpperCamelCase = self.get_extended_attention_mask(__A , __A , __A ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: __UpperCamelCase = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: __UpperCamelCase = encoder_attention_mask[:, None, None, :] __UpperCamelCase = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility __UpperCamelCase = (1.0 - encoder_extended_attention_mask) * -1_0000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __UpperCamelCase = self.get_head_mask(__A , self.config.num_hidden_layers ) __UpperCamelCase = self.embeddings( input_ids=__A , position_ids=__A , token_type_ids=__A , inputs_embeds=__A ) __UpperCamelCase = self.encoder( __A , attention_mask=__A , head_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , ) __UpperCamelCase = encoder_outputs[0] __UpperCamelCase = self.pooler(__A ) __UpperCamelCase = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : Optional[Any] , __A : Optional[Any] , __A : Optional[Any] ): __UpperCamelCase = message __UpperCamelCase = exit_layer # start from 1! class snake_case ( nn.Module ): """simple docstring""" def __init__( self : str , __A : List[str] ): super().__init__() __UpperCamelCase = BertPooler(__A ) __UpperCamelCase = nn.Dropout(config.hidden_dropout_prob ) __UpperCamelCase = nn.Linear(config.hidden_size , config.num_labels ) def _lowerCamelCase ( self : Dict , __A : Optional[int] ): # Pooler __UpperCamelCase = encoder_outputs[0] __UpperCamelCase = self.pooler(__A ) # "return" pooler_output # BertModel __UpperCamelCase = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification __UpperCamelCase = bmodel_output[1] __UpperCamelCase = self.dropout(__A ) __UpperCamelCase = self.classifier(__A ) return logits, pooled_output @add_start_docstrings( "Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , __lowerCamelCase , ) class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : List[str] , __A : Any ): super().__init__(__A ) __UpperCamelCase = config.num_labels __UpperCamelCase = config.num_hidden_layers __UpperCamelCase = DeeBertModel(__A ) __UpperCamelCase = nn.Dropout(config.hidden_dropout_prob ) __UpperCamelCase = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(__A ) def _lowerCamelCase ( self : Optional[int] , __A : Optional[int]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : List[str]=None , __A : int=None , __A : Optional[Any]=None , __A : Any=None , __A : str=-1 , __A : int=False , ): __UpperCamelCase = self.num_layers try: __UpperCamelCase = self.bert( __A , attention_mask=__A , token_type_ids=__A , position_ids=__A , head_mask=__A , inputs_embeds=__A , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits __UpperCamelCase = outputs[1] __UpperCamelCase = self.dropout(__A ) __UpperCamelCase = self.classifier(__A ) __UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: __UpperCamelCase = e.message __UpperCamelCase = e.exit_layer __UpperCamelCase = outputs[0] if not self.training: __UpperCamelCase = entropy(__A ) __UpperCamelCase = [] __UpperCamelCase = [] if labels is not None: if self.num_labels == 1: # We are doing regression __UpperCamelCase = MSELoss() __UpperCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: __UpperCamelCase = CrossEntropyLoss() __UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits __UpperCamelCase = [] for highway_exit in outputs[-1]: __UpperCamelCase = highway_exit[0] if not self.training: highway_logits_all.append(__A ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression __UpperCamelCase = MSELoss() __UpperCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: __UpperCamelCase = CrossEntropyLoss() __UpperCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(__A ) if train_highway: __UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: __UpperCamelCase = (loss,) + outputs if not self.training: __UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: __UpperCamelCase = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
53
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : List[Any] =logging.get_logger(__name__) a__ : List[Any] ={ '''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model" def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ): super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = project_dim class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model" def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ): super().__init__(**__A ) __UpperCamelCase = hidden_size __UpperCamelCase = intermediate_size __UpperCamelCase = projection_dim __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = num_channels __UpperCamelCase = patch_size __UpperCamelCase = image_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = attention_dropout __UpperCamelCase = layer_norm_eps __UpperCamelCase = hidden_act @classmethod def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ): cls._set_token_in_kwargs(__A ) __UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('model_type' ) == "altclip": __UpperCamelCase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] ="altclip" SCREAMING_SNAKE_CASE_ : Optional[int] =True def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). __UpperCamelCase = kwargs.pop('text_config_dict' , __A ) __UpperCamelCase = kwargs.pop('vision_config_dict' , __A ) super().__init__(**__A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __UpperCamelCase = {} # This is the complete result when using `text_config_dict`. __UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __UpperCamelCase = {} # This is the complete result when using `vision_config_dict`. __UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __UpperCamelCase = { str(__A ): value for key, value in _vision_config_dict['id2label'].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __UpperCamelCase = {} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' ) if vision_config is None: __UpperCamelCase = {} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' ) __UpperCamelCase = AltCLIPTextConfig(**__A ) __UpperCamelCase = AltCLIPVisionConfig(**__A ) __UpperCamelCase = projection_dim __UpperCamelCase = logit_scale_init_value __UpperCamelCase = 1.0 @classmethod def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = copy.deepcopy(self.__dict__ ) __UpperCamelCase = self.text_config.to_dict() __UpperCamelCase = self.vision_config.to_dict() __UpperCamelCase = self.__class__.model_type return output
53
1
'''simple docstring''' from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case ( __lowerCamelCase ): """simple docstring""" def _lowerCamelCase ( self : int ): __UpperCamelCase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__A , 'embed_dim' ) ) self.parent.assertTrue(hasattr(__A , 'num_heads' ) ) class snake_case : """simple docstring""" def __init__( self : str , __A : Optional[int] , __A : Dict=1_3 , __A : Optional[Any]=6_4 , __A : Tuple=3 , __A : str=[1_6, 4_8, 9_6] , __A : List[Any]=[1, 3, 6] , __A : Tuple=[1, 2, 1_0] , __A : str=[7, 3, 3] , __A : Optional[int]=[4, 2, 2] , __A : Any=[2, 1, 1] , __A : List[str]=[2, 2, 2] , __A : List[Any]=[False, False, True] , __A : Dict=[0.0, 0.0, 0.0] , __A : Dict=0.02 , __A : int=1e-12 , __A : int=True , __A : List[Any]=True , __A : List[Any]=2 , ): __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = image_size __UpperCamelCase = patch_sizes __UpperCamelCase = patch_stride __UpperCamelCase = patch_padding __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = num_labels __UpperCamelCase = num_channels __UpperCamelCase = embed_dim __UpperCamelCase = num_heads __UpperCamelCase = stride_kv __UpperCamelCase = depth __UpperCamelCase = cls_token __UpperCamelCase = attention_drop_rate __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps def _lowerCamelCase ( self : Any ): __UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCamelCase = None if self.use_labels: # create a random int32 tensor of given shape __UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __UpperCamelCase = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self : str ): return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def _lowerCamelCase ( self : Any , __A : List[str] , __A : List[str] , __A : Optional[Any] ): __UpperCamelCase = TFCvtModel(config=__A ) __UpperCamelCase = model(__A , training=__A ) __UpperCamelCase = (self.image_size, self.image_size) __UpperCamelCase , __UpperCamelCase = image_size[0], image_size[1] for i in range(len(self.depth ) ): __UpperCamelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __UpperCamelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def _lowerCamelCase ( self : Optional[Any] , __A : Any , __A : Optional[int] , __A : Optional[Any] ): __UpperCamelCase = self.num_labels __UpperCamelCase = TFCvtForImageClassification(__A ) __UpperCamelCase = model(__A , labels=__A , training=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs __UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] =(TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () SCREAMING_SNAKE_CASE_ : int =( {"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE_ : Optional[Any] =False SCREAMING_SNAKE_CASE_ : Union[str, Any] =False SCREAMING_SNAKE_CASE_ : Optional[int] =False SCREAMING_SNAKE_CASE_ : Any =False SCREAMING_SNAKE_CASE_ : int =False def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = TFCvtModelTester(self ) __UpperCamelCase = TFCvtConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 ) def _lowerCamelCase ( self : Any ): self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason='Cvt does not output attentions' ) def _lowerCamelCase ( self : Tuple ): pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def _lowerCamelCase ( self : List[str] ): pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def _lowerCamelCase ( self : int ): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) def _lowerCamelCase ( self : Dict ): super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def _lowerCamelCase ( self : List[Any] ): super().test_keras_fit() @unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' ) def _lowerCamelCase ( self : Union[str, Any] ): __UpperCamelCase = tf.keras.mixed_precision.Policy('mixed_float16' ) tf.keras.mixed_precision.set_global_policy(__A ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy('float32' ) def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(__A ) __UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase = [*signature.parameters.keys()] __UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , __A ) def _lowerCamelCase ( self : Union[str, Any] ): def check_hidden_states_output(__A : List[Any] , __A : List[Any] , __A : Dict ): __UpperCamelCase = model_class(__A ) __UpperCamelCase = model(**self._prepare_for_class(__A , __A ) ) __UpperCamelCase = outputs.hidden_states __UpperCamelCase = len(self.model_tester.depth ) self.assertEqual(len(__A ) , __A ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = True check_hidden_states_output(__A , __A , __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCamelCase = True check_hidden_states_output(__A , __A , __A ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) @slow def _lowerCamelCase ( self : Dict ): for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = TFCvtModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def lowercase__ ( ) -> str: """simple docstring""" __UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCamelCase ( self : Dict ): return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _lowerCamelCase ( self : Dict ): __UpperCamelCase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __UpperCamelCase = self.default_image_processor __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=__A , return_tensors='tf' ) # forward pass __UpperCamelCase = model(**__A ) # verify the logits __UpperCamelCase = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __A ) __UpperCamelCase = tf.constant([0.9285, 0.9015, -0.3150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __A , atol=1e-4 ) )
53
'''simple docstring''' import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Any ) -> Optional[Any]: """simple docstring""" with open(__lowercase ) as metadata_file: __UpperCamelCase = json.load(__lowercase ) __UpperCamelCase = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['model_config'] ) # Load in the weights from the checkpoint_path __UpperCamelCase = torch.load(__lowercase , map_location='cpu' ) # Load the entity vocab file __UpperCamelCase = load_entity_vocab(__lowercase ) __UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks __UpperCamelCase = AddedToken('<ent>' , lstrip=__lowercase , rstrip=__lowercase ) __UpperCamelCase = AddedToken('<ent2>' , lstrip=__lowercase , rstrip=__lowercase ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(__lowercase ) with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(__lowercase , __lowercase ) __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase ) # Initialize the embeddings of the special tokens __UpperCamelCase = state_dict['embeddings.word_embeddings.weight'] __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 ) __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 ) __UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __UpperCamelCase = F'''encoder.layer.{layer_index}.attention.self.''' __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight'] __UpperCamelCase = entity_emb[entity_vocab['[MASK]']] __UpperCamelCase = LukeModel(config=__lowercase ).eval() __UpperCamelCase , __UpperCamelCase = model.load_state_dict(__lowercase , strict=__lowercase ) if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'''Missing keys {', '.join(__lowercase )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )): raise ValueError( 'Unexpected keys' F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' ) # Check outputs __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase , task='entity_classification' ) __UpperCamelCase = ( 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the' ' new world number one avoid a humiliating second- round exit at Wimbledon .' ) __UpperCamelCase = (39, 42) __UpperCamelCase = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors='pt' ) __UpperCamelCase = model(**__lowercase ) # Verify word hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 42, 1024) ) __UpperCamelCase = torch.tensor( [[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] ) else: # base __UpperCamelCase = torch.Size((1, 42, 768) ) __UpperCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 1, 1024) ) __UpperCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] ) else: # base __UpperCamelCase = torch.Size((1, 1, 768) ) __UpperCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(__lowercase ) ) model.save_pretrained(__lowercase ) def lowercase__ ( __lowercase : Dict ) -> List[str]: """simple docstring""" __UpperCamelCase = {} with open(__lowercase , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(__lowercase ): __UpperCamelCase , __UpperCamelCase = line.rstrip().split('\t' ) __UpperCamelCase = index return entity_vocab if __name__ == "__main__": a__ : Any =argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) a__ : str =parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
53
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
53
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class snake_case ( __lowerCamelCase ): """simple docstring""" def _lowerCamelCase ( self : Any ): __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = 8 # DPR tok __UpperCamelCase = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __UpperCamelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok __UpperCamelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) ) __UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __UpperCamelCase = {'unk_token': '<unk>'} __UpperCamelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__A ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__A ) ) def _lowerCamelCase ( self : Tuple ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Optional[int] ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Union[str, Any] ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def _lowerCamelCase ( self : str ): shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _lowerCamelCase ( self : Any , __A : bool ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , ) if from_disk: __UpperCamelCase = os.path.join(self.tmpdirname , 'dataset' ) __UpperCamelCase = os.path.join(self.tmpdirname , 'index.faiss' ) dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) ) dataset.drop_index('embeddings' ) dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) ) del dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , ) return retriever def _lowerCamelCase ( self : int ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) __UpperCamelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' ) dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' ) pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) ) __UpperCamelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' ) __UpperCamelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset} pickle.dump(__A , open(__A , 'wb' ) ) __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , ) __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = self.get_dummy_dataset() retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_legacy_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] ) self.assertEqual(len(doc_dicts[0]['text'] ) , __A ) self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Optional[Any] ): import torch __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , np.ndarray ) __UpperCamelCase = retriever( __A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( # noqa: F841 out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], out['doc_ids'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dpr_ctx_encoder_tokenizer() __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) retriever.set_ctx_encoder_tokenizer(__A ) __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) self.assertEqual( len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
53
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any ="dandelin/vilt-b32-finetuned-vqa" SCREAMING_SNAKE_CASE_ : Optional[int] =( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) SCREAMING_SNAKE_CASE_ : Tuple ="image_qa" SCREAMING_SNAKE_CASE_ : List[str] =AutoProcessor SCREAMING_SNAKE_CASE_ : List[Any] =AutoModelForVisualQuestionAnswering SCREAMING_SNAKE_CASE_ : Optional[Any] =["image", "text"] SCREAMING_SNAKE_CASE_ : Optional[int] =["text"] def __init__( self : List[str] , *__A : str , **__A : List[str] ): requires_backends(self , ['vision'] ) super().__init__(*__A , **__A ) def _lowerCamelCase ( self : List[Any] , __A : "Image" , __A : str ): return self.pre_processor(__A , __A , return_tensors='pt' ) def _lowerCamelCase ( self : Any , __A : Tuple ): with torch.no_grad(): return self.model(**__A ).logits def _lowerCamelCase ( self : Tuple , __A : int ): __UpperCamelCase = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
53
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[Any] ={ '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] =[ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys a__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
1
'''simple docstring''' import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Any ) -> Optional[Any]: """simple docstring""" with open(__lowercase ) as metadata_file: __UpperCamelCase = json.load(__lowercase ) __UpperCamelCase = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['model_config'] ) # Load in the weights from the checkpoint_path __UpperCamelCase = torch.load(__lowercase , map_location='cpu' ) # Load the entity vocab file __UpperCamelCase = load_entity_vocab(__lowercase ) __UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks __UpperCamelCase = AddedToken('<ent>' , lstrip=__lowercase , rstrip=__lowercase ) __UpperCamelCase = AddedToken('<ent2>' , lstrip=__lowercase , rstrip=__lowercase ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(__lowercase ) with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(__lowercase , __lowercase ) __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase ) # Initialize the embeddings of the special tokens __UpperCamelCase = state_dict['embeddings.word_embeddings.weight'] __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 ) __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 ) __UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __UpperCamelCase = F'''encoder.layer.{layer_index}.attention.self.''' __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight'] __UpperCamelCase = entity_emb[entity_vocab['[MASK]']] __UpperCamelCase = LukeModel(config=__lowercase ).eval() __UpperCamelCase , __UpperCamelCase = model.load_state_dict(__lowercase , strict=__lowercase ) if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'''Missing keys {', '.join(__lowercase )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )): raise ValueError( 'Unexpected keys' F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' ) # Check outputs __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase , task='entity_classification' ) __UpperCamelCase = ( 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the' ' new world number one avoid a humiliating second- round exit at Wimbledon .' ) __UpperCamelCase = (39, 42) __UpperCamelCase = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors='pt' ) __UpperCamelCase = model(**__lowercase ) # Verify word hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 42, 1024) ) __UpperCamelCase = torch.tensor( [[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] ) else: # base __UpperCamelCase = torch.Size((1, 42, 768) ) __UpperCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 1, 1024) ) __UpperCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] ) else: # base __UpperCamelCase = torch.Size((1, 1, 768) ) __UpperCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(__lowercase ) ) model.save_pretrained(__lowercase ) def lowercase__ ( __lowercase : Dict ) -> List[str]: """simple docstring""" __UpperCamelCase = {} with open(__lowercase , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(__lowercase ): __UpperCamelCase , __UpperCamelCase = line.rstrip().split('\t' ) __UpperCamelCase = index return entity_vocab if __name__ == "__main__": a__ : Any =argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) a__ : str =parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
53
'''simple docstring''' import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any]=False ) -> Tuple: """simple docstring""" try: __UpperCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __UpperCamelCase = default else: # KEY is set, convert it to True or False. try: __UpperCamelCase = strtobool(__lowercase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value a__ : str =parse_flag_from_env('''RUN_SLOW''', default=False) a__ : Union[str, Any] =parse_flag_from_env('''RUN_REMOTE''', default=False) a__ : List[str] =parse_flag_from_env('''RUN_LOCAL''', default=True) a__ : Optional[int] =parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression a__ : Any =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') a__ : Optional[int] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') a__ : List[str] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio a__ : Any =pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam a__ : Tuple =pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility a__ : Union[str, Any] =pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows a__ : int =pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase__ ( __lowercase : Optional[Any] ) -> Optional[int]: """simple docstring""" try: import faiss # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires faiss' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Any: """simple docstring""" try: import regex # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires regex' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Tuple ) -> List[Any]: """simple docstring""" try: import elasticsearch # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires elasticsearch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Tuple: """simple docstring""" try: import sqlalchemy # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires sqlalchemy' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[str] ) -> List[str]: """simple docstring""" if not config.TORCH_AVAILABLE: __UpperCamelCase = unittest.skip('test requires PyTorch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[Any] ) -> List[str]: """simple docstring""" if not config.TF_AVAILABLE: __UpperCamelCase = unittest.skip('test requires TensorFlow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : int ) -> Union[str, Any]: """simple docstring""" if not config.JAX_AVAILABLE: __UpperCamelCase = unittest.skip('test requires JAX' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> Optional[Any]: """simple docstring""" if not config.PIL_AVAILABLE: __UpperCamelCase = unittest.skip('test requires Pillow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : int ) -> int: """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> int: """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> Any: """simple docstring""" def _require_spacy_model(__lowercase : Any ): try: import spacy # noqa F401 spacy.load(__lowercase ) except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(__lowercase ) )(__lowercase ) else: return test_case return _require_spacy_model def lowercase__ ( __lowercase : Union[str, Any] ) -> str: """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]: """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: __UpperCamelCase = unittest.skip('test is slow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_local_tests or _run_local_tests == 0: __UpperCamelCase = unittest.skip('test is local' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: __UpperCamelCase = unittest.skip('test is packaged' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Any: """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: __UpperCamelCase = unittest.skip('test requires remote' )(__lowercase ) return test_case def lowercase__ ( *__lowercase : Optional[Any] ) -> Tuple: """simple docstring""" def decorate(cls : int ): for name, fn in cls.__dict__.items(): if callable(__lowercase ) and name.startswith('test' ): for decorator in decorators: __UpperCamelCase = decorator(__lowercase ) setattr(cls , __lowercase , __lowercase ) return cls return decorate class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =0 SCREAMING_SNAKE_CASE_ : List[Any] =1 SCREAMING_SNAKE_CASE_ : Union[str, Any] =2 @contextmanager def lowercase__ ( __lowercase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , __lowercase : Dict=1e-16 ) -> List[Any]: """simple docstring""" __UpperCamelCase = requests.Session().request def timeout_request(__lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , **__lowercase : List[str] ): # Change the url to an invalid url so that the connection hangs __UpperCamelCase = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) __UpperCamelCase = timeout try: return online_request(__lowercase , __lowercase , **__lowercase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __UpperCamelCase = url __UpperCamelCase = e.args[0] __UpperCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]''' ),) __UpperCamelCase = (max_retry_error,) raise def raise_connection_error(__lowercase : int , __lowercase : List[str] , **__lowercase : Union[str, Any] ): raise requests.ConnectionError('Offline mode is enabled.' , request=__lowercase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , __lowercase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , __lowercase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , __lowercase ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def lowercase__ ( *__lowercase : Any , **__lowercase : Dict ) -> Dict: """simple docstring""" __UpperCamelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__lowercase , **__lowercase ) as tmp_dir: try: os.chdir(__lowercase ) yield finally: os.chdir(__lowercase ) @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase__ ( __lowercase : List[str] , __lowercase : int ) -> Union[str, Any]: """simple docstring""" return deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(__lowercase : List[Any] , *__lowercase : Tuple , **__lowercase : Union[str, Any] ): try: return func(*__lowercase , **__lowercase ) except HTTPError as err: if str(__lowercase ).startswith('500' ) or str(__lowercase ).startswith('502' ): pytest.xfail(str(__lowercase ) ) raise err return decorator.decorator(_wrapper , __lowercase ) class snake_case : """simple docstring""" def __init__( self : int , __A : Any , __A : str , __A : List[Any] ): __UpperCamelCase = returncode __UpperCamelCase = stdout __UpperCamelCase = stderr async def lowercase__ ( __lowercase : Any , __lowercase : Optional[int] ) -> str: """simple docstring""" while True: __UpperCamelCase = await stream.readline() if line: callback(__lowercase ) else: break async def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : Any=None , __lowercase : Optional[Any]=None , __lowercase : int=False , __lowercase : List[Any]=False ) -> _RunOutput: """simple docstring""" if echo: print('\nRunning: ' , ' '.join(__lowercase ) ) __UpperCamelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __UpperCamelCase = [] __UpperCamelCase = [] def tee(__lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Tuple="" ): __UpperCamelCase = line.decode('utf-8' ).rstrip() sink.append(__lowercase ) if not quiet: print(__lowercase , __lowercase , file=__lowercase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label='stderr:' ) ), ] , timeout=__lowercase , ) return _RunOutput(await p.wait() , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : int=180 , __lowercase : int=False , __lowercase : str=True ) -> _RunOutput: """simple docstring""" __UpperCamelCase = asyncio.get_event_loop() __UpperCamelCase = loop.run_until_complete( _stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) ) __UpperCamelCase = ' '.join(__lowercase ) if result.returncode > 0: __UpperCamelCase = '\n'.join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' ) return result def lowercase__ ( ) -> List[str]: """simple docstring""" __UpperCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) __UpperCamelCase = re.sub(R'^gw' , '' , __lowercase , 0 , re.M ) return int(__lowercase ) def lowercase__ ( ) -> List[Any]: """simple docstring""" __UpperCamelCase = 29500 __UpperCamelCase = pytest_xdist_worker_id() return port + uniq_delta
53
1
'''simple docstring''' def lowercase__ ( __lowercase : Union[str, Any] ) -> Union[str, Any]: # noqa: E741 """simple docstring""" __UpperCamelCase = len(__lowercase ) __UpperCamelCase = 0 __UpperCamelCase = [0] * n __UpperCamelCase = [False] * n __UpperCamelCase = [False] * n def dfs(__lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Any ): if parent == root: out_edge_count += 1 __UpperCamelCase = True __UpperCamelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: __UpperCamelCase = dfs(__lowercase , __lowercase , __lowercase , __lowercase ) __UpperCamelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: __UpperCamelCase = True # AP found via cycle if at == low[to]: __UpperCamelCase = True else: __UpperCamelCase = min(low[at] , __lowercase ) return out_edge_count for i in range(__lowercase ): if not visited[i]: __UpperCamelCase = 0 __UpperCamelCase = dfs(__lowercase , __lowercase , -1 , __lowercase ) __UpperCamelCase = out_edge_count > 1 for x in range(len(__lowercase ) ): if is_art[x] is True: print(__lowercase ) # Adjacency list of graph a__ : int ={ 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
53
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys a__ : Tuple ='''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
53
1
'''simple docstring''' from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def lowercase__ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int ) -> tuple[int | None, int | None, float]: """simple docstring""" if not arr: return None, None, 0 if low == high: return low, high, arr[low] __UpperCamelCase = (low + high) // 2 __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = max_subarray(__lowercase , __lowercase , __lowercase ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = max_subarray(__lowercase , mid + 1 , __lowercase ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = max_cross_sum(__lowercase , __lowercase , __lowercase , __lowercase ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def lowercase__ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int , __lowercase : int ) -> tuple[int, int, float]: """simple docstring""" __UpperCamelCase , __UpperCamelCase = float('-inf' ), -1 __UpperCamelCase , __UpperCamelCase = float('-inf' ), -1 __UpperCamelCase = 0 for i in range(__lowercase , low - 1 , -1 ): summ += arr[i] if summ > left_sum: __UpperCamelCase = summ __UpperCamelCase = i __UpperCamelCase = 0 for i in range(mid + 1 , high + 1 ): summ += arr[i] if summ > right_sum: __UpperCamelCase = summ __UpperCamelCase = i return max_left, max_right, (left_sum + right_sum) def lowercase__ ( __lowercase : int ) -> float: """simple docstring""" __UpperCamelCase = [randint(1 , __lowercase ) for _ in range(__lowercase )] __UpperCamelCase = time.time() max_subarray(__lowercase , 0 , input_size - 1 ) __UpperCamelCase = time.time() return end - start def lowercase__ ( ) -> None: """simple docstring""" __UpperCamelCase = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000] __UpperCamelCase = [time_max_subarray(__lowercase ) for input_size in input_sizes] print('No of Inputs\t\tTime Taken' ) for input_size, runtime in zip(__lowercase , __lowercase ): print(__lowercase , '\t\t' , __lowercase ) plt.plot(__lowercase , __lowercase ) plt.xlabel('Number of Inputs' ) plt.ylabel('Time taken in seconds' ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
53
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Tuple ) -> Tuple: """simple docstring""" return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :] def lowercase__ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] , __lowercase : List[str]="attention" ) -> Optional[Any]: """simple docstring""" __UpperCamelCase = __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] ) __UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] ) __UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] ) __UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] ) __UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : int , __lowercase : List[Any]=False ) -> Optional[Any]: """simple docstring""" if split_mlp_wi: __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :] __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :] __UpperCamelCase = (wi_a, wi_a) else: __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :] __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :] return wi, wo def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] ) -> str: """simple docstring""" return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i] def lowercase__ ( __lowercase : dict , *, __lowercase : int , __lowercase : bool , __lowercase : bool = False ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = traverse_util.flatten_dict(variables['target'] ) __UpperCamelCase = {'/'.join(__lowercase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old print('Split MLP:' , __lowercase ) __UpperCamelCase = collections.OrderedDict() # Shared embeddings. __UpperCamelCase = old['token_embedder/embedding'] # Encoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'encoder' , 'attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 1 (MLP). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_mlp_layer_norm' ) __UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'encoder' , __lowercase ) __UpperCamelCase = layer_norm if split_mlp_wi: __UpperCamelCase = wi[0].T __UpperCamelCase = wi[1].T else: __UpperCamelCase = wi.T __UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , __lowercase , 'encoder' ).T __UpperCamelCase = old['encoder/encoder_norm/scale'] if not scalable_attention: __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , 0 , 'encoder' ).T __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , 0 , 'decoder' ).T if not is_encoder_only: # Decoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_self_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'self_attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 1 (Cross Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_cross_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'encoder_decoder_attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 2 (MLP). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_mlp_layer_norm' ) __UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'decoder' , __lowercase ) __UpperCamelCase = layer_norm if split_mlp_wi: __UpperCamelCase = wi[0].T __UpperCamelCase = wi[1].T else: __UpperCamelCase = wi.T __UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase = tax_relpos_bias_lookup(__lowercase , __lowercase , 'decoder' ).T __UpperCamelCase = old['decoder/decoder_norm/scale'] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __UpperCamelCase = old['decoder/logits_dense/kernel'].T return new def lowercase__ ( __lowercase : Optional[Any] , __lowercase : bool ) -> int: """simple docstring""" __UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __UpperCamelCase = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __UpperCamelCase = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) __UpperCamelCase = state_dict['shared.weight'] return state_dict def lowercase__ ( __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = checkpoints.load_tax_checkpoint(__lowercase ) __UpperCamelCase = convert_tax_to_pytorch( __lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase , scalable_attention=__lowercase ) __UpperCamelCase = make_state_dict(__lowercase , __lowercase ) model.load_state_dict(__lowercase , strict=__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : bool = False , __lowercase : bool = False , ) -> Optional[int]: """simple docstring""" __UpperCamelCase = MTaConfig.from_json_file(__lowercase ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __UpperCamelCase = UMTaEncoderModel(__lowercase ) else: __UpperCamelCase = UMTaForConditionalGeneration(__lowercase ) # Load weights from tf checkpoint load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(__lowercase ) # Verify that we can load the checkpoint. model.from_pretrained(__lowercase ) print('Done' ) if __name__ == "__main__": a__ : List[Any] =argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) parser.add_argument( '''--scalable_attention''', action='''store_true''', help='''Whether the model uses scaled attention (umt5 model)''', default=False, ) a__ : List[str] =parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
53
1
'''simple docstring''' import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a__ : int =[ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ['''memory_attention''', '''encoder_attn'''], ['''attention''', '''attn'''], ['''/''', '''.'''], ['''.LayerNorm.gamma''', '''_layer_norm.weight'''], ['''.LayerNorm.beta''', '''_layer_norm.bias'''], ['''r.layer_''', '''r.layers.'''], ['''output_proj''', '''out_proj'''], ['''ffn.dense_1.''', '''fc2.'''], ['''ffn.dense.''', '''fc1.'''], ['''ffn_layer_norm''', '''final_layer_norm'''], ['''kernel''', '''weight'''], ['''encoder_layer_norm.''', '''encoder.layer_norm.'''], ['''decoder_layer_norm.''', '''decoder.layer_norm.'''], ['''embeddings.weights''', '''shared.weight'''], ] def lowercase__ ( __lowercase : Dict ) -> Tuple: """simple docstring""" for pegasus_name, hf_name in PATTERNS: __UpperCamelCase = k.replace(__lowercase , __lowercase ) return k def lowercase__ ( __lowercase : dict , __lowercase : dict ) -> PegasusForConditionalGeneration: """simple docstring""" __UpperCamelCase = DEFAULTS.copy() cfg_kwargs.update(__lowercase ) __UpperCamelCase = PegasusConfig(**__lowercase ) __UpperCamelCase = PegasusForConditionalGeneration(__lowercase ) __UpperCamelCase = torch_model.model.state_dict() __UpperCamelCase = {} for k, v in tf_weights.items(): __UpperCamelCase = rename_state_dict_key(__lowercase ) if new_k not in sd: raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if "dense" in k or "proj" in new_k: __UpperCamelCase = v.T __UpperCamelCase = torch.tensor(__lowercase , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}''' # make sure embedding.padding_idx is respected __UpperCamelCase = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] ) __UpperCamelCase = mapping['shared.weight'] __UpperCamelCase = mapping['shared.weight'] __UpperCamelCase = {k: torch.zeros_like(__lowercase ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping} mapping.update(**__lowercase ) __UpperCamelCase , __UpperCamelCase = torch_model.model.load_state_dict(__lowercase , strict=__lowercase ) __UpperCamelCase = [ k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight'] ] assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], F'''no matches found for the following tf keys {extra}''' return torch_model def lowercase__ ( __lowercase : Optional[Any]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict: """simple docstring""" __UpperCamelCase = tf.train.list_variables(__lowercase ) __UpperCamelCase = {} __UpperCamelCase = ['Adafactor', 'global_step'] for name, shape in tqdm(__lowercase , desc='converting tf checkpoint to dict' ): __UpperCamelCase = any(pat in name for pat in ignore_name ) if skip_key: continue __UpperCamelCase = tf.train.load_variable(__lowercase , __lowercase ) __UpperCamelCase = array return tf_weights def lowercase__ ( __lowercase : str , __lowercase : str ) -> Optional[Any]: """simple docstring""" __UpperCamelCase = Path(__lowercase ).parent.name __UpperCamelCase = task_specific_params[F'''summarization_{dataset}''']['max_position_embeddings'] __UpperCamelCase = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=__lowercase ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(__lowercase ) # convert model __UpperCamelCase = get_tf_weights_as_numpy(__lowercase ) __UpperCamelCase = task_specific_params[F'''summarization_{dataset}'''] if dataset == "large": __UpperCamelCase = task_specific_params __UpperCamelCase = convert_pegasus(__lowercase , __lowercase ) torch_model.save_pretrained(__lowercase ) __UpperCamelCase = torch_model.state_dict() sd.pop('model.decoder.embed_positions.weight' ) sd.pop('model.encoder.embed_positions.weight' ) torch.save(__lowercase , Path(__lowercase ) / 'pytorch_model.bin' ) if __name__ == "__main__": a__ : Union[str, Any] =argparse.ArgumentParser() # Required parameters parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') a__ : int =parser.parse_args() if args.save_dir is None: a__ : str =Path(args.tf_ckpt_path).parent.name a__ : str =os.path.join('''pegasus''', dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
53
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ : List[Any] ="BlipImageProcessor" SCREAMING_SNAKE_CASE_ : Optional[int] =("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , __A : Optional[int] , __A : List[Any] ): __UpperCamelCase = False super().__init__(__A , __A ) __UpperCamelCase = self.image_processor def __call__( self : List[Any] , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ): if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: __UpperCamelCase = self.tokenizer __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) return text_encoding # add pixel_values __UpperCamelCase = self.image_processor(__A , return_tensors=__A ) if text is not None: __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) else: __UpperCamelCase = None if text_encoding is not None: encoding_image_processor.update(__A ) return encoding_image_processor def _lowerCamelCase ( self : List[Any] , *__A : Dict , **__A : Optional[int] ): return self.tokenizer.batch_decode(*__A , **__A ) def _lowerCamelCase ( self : List[Any] , *__A : List[str] , **__A : Dict ): return self.tokenizer.decode(*__A , **__A ) @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.tokenizer.model_input_names __UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
53
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' ) __UpperCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' ) __UpperCamelCase = tokenizer('Hello there' , return_tensors='tf' ).input_ids __UpperCamelCase = tokenizer('Hi I am' , return_tensors='tf' ).input_ids __UpperCamelCase = model(__A , labels=__A ).loss __UpperCamelCase = -tf.math.reduce_mean(__A ).numpy() __UpperCamelCase = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
53
'''simple docstring''' from __future__ import annotations from typing import Any class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case : """simple docstring""" def __init__( self : List[Any] , __A : Any ): __UpperCamelCase = data __UpperCamelCase = None def __iter__( self : Optional[Any] ): __UpperCamelCase = self __UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(__A ) yield node.data __UpperCamelCase = node.next_node @property def _lowerCamelCase ( self : List[str] ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": a__ : Dict =Node(1) a__ : Optional[int] =Node(2) a__ : List[str] =Node(3) a__ : Optional[int] =Node(4) print(root_node.has_loop) # False a__ : str =root_node.next_node print(root_node.has_loop) # True a__ : Optional[int] =Node(5) a__ : List[Any] =Node(6) a__ : int =Node(5) a__ : Tuple =Node(6) print(root_node.has_loop) # False a__ : str =Node(1) print(root_node.has_loop) # False
53
1
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class snake_case ( __lowerCamelCase ): """simple docstring""" def _lowerCamelCase ( self : List[Any] , __A : Tuple=None , __A : Union[str, Any]=None , __A : Optional[Any]=None , **__A : str ): if tokenize_kwargs is None: __UpperCamelCase = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) __UpperCamelCase = truncation __UpperCamelCase = tokenize_kwargs __UpperCamelCase = {} if return_tensors is not None: __UpperCamelCase = return_tensors return preprocess_params, {}, postprocess_params def _lowerCamelCase ( self : Union[str, Any] , __A : Any , **__A : Dict ): __UpperCamelCase = self.framework __UpperCamelCase = self.tokenizer(__A , return_tensors=__A , **__A ) return model_inputs def _lowerCamelCase ( self : List[str] , __A : Tuple ): __UpperCamelCase = self.model(**__A ) return model_outputs def _lowerCamelCase ( self : str , __A : Tuple , __A : Tuple=False ): # [0] is the first available tensor, logits or last_hidden_state. if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[Any] , *__A : Dict , **__A : int ): return super().__call__(*__A , **__A )
53
'''simple docstring''' a__ : Optional[Any] =256 # Modulus to hash a string a__ : Dict =1_000_003 def lowercase__ ( __lowercase : str , __lowercase : str ) -> bool: """simple docstring""" __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) if p_len > t_len: return False __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 1 # Calculating the hash of pattern and substring of text for i in range(__lowercase ): __UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __UpperCamelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __UpperCamelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def lowercase__ ( ) -> None: """simple docstring""" __UpperCamelCase = 'abc1abc12' __UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __UpperCamelCase = 'alskfjaldsk23adsfabcabc' assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase ) # Test 2) __UpperCamelCase = 'ABABX' __UpperCamelCase = 'ABABZABABYABABX' assert rabin_karp(__lowercase , __lowercase ) # Test 3) __UpperCamelCase = 'AAAB' __UpperCamelCase = 'ABAAAAAB' assert rabin_karp(__lowercase , __lowercase ) # Test 4) __UpperCamelCase = 'abcdabcy' __UpperCamelCase = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(__lowercase , __lowercase ) # Test 5) __UpperCamelCase = 'Lü' __UpperCamelCase = 'Lüsai' assert rabin_karp(__lowercase , __lowercase ) __UpperCamelCase = 'Lue' assert not rabin_karp(__lowercase , __lowercase ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
53
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class snake_case : """simple docstring""" SCREAMING_SNAKE_CASE_ : str =field( metadata={"help": "The output directory where the model will be written."} , ) SCREAMING_SNAKE_CASE_ : str =field( metadata={ "help": ( "The encoder model checkpoint for weights initialization." "Don't set if you want to train an encoder model from scratch." ) } , ) SCREAMING_SNAKE_CASE_ : str =field( metadata={ "help": ( "The decoder model checkpoint for weights initialization." "Don't set if you want to train a decoder model from scratch." ) } , ) SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} ) SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} ) def lowercase__ ( ) -> Any: """simple docstring""" __UpperCamelCase = HfArgumentParser((ModelArguments,) ) ((__UpperCamelCase) , ) = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: __UpperCamelCase = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: __UpperCamelCase = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: __UpperCamelCase = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: __UpperCamelCase = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__lowercase , decoder_config=__lowercase , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens __UpperCamelCase = decoder_config.decoder_start_token_id __UpperCamelCase = decoder_config.pad_token_id if decoder_start_token_id is None: __UpperCamelCase = decoder_config.bos_token_id if pad_token_id is None: __UpperCamelCase = decoder_config.eos_token_id # This is necessary to make Flax's generate() work __UpperCamelCase = decoder_config.eos_token_id __UpperCamelCase = decoder_start_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) __UpperCamelCase = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) __UpperCamelCase = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
53
'''simple docstring''' from __future__ import annotations class snake_case : """simple docstring""" def __init__( self : Optional[int] , __A : list[list[int]] ): __UpperCamelCase = TypeError( 'Matrices must be formed from a list of zero or more lists containing at ' 'least one and the same number of values, each of which must be of type ' 'int or float.' ) if len(__A ) != 0: __UpperCamelCase = len(rows[0] ) if cols == 0: raise error for row in rows: if len(__A ) != cols: raise error for value in row: if not isinstance(__A , (int, float) ): raise error __UpperCamelCase = rows else: __UpperCamelCase = [] def _lowerCamelCase ( self : int ): return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def _lowerCamelCase ( self : str ): return len(self.rows ) @property def _lowerCamelCase ( self : Any ): return len(self.rows[0] ) @property def _lowerCamelCase ( self : Optional[Any] ): return (self.num_rows, self.num_columns) @property def _lowerCamelCase ( self : Dict ): return self.order[0] == self.order[1] def _lowerCamelCase ( self : Any ): __UpperCamelCase = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Any ): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def _lowerCamelCase ( self : List[str] ): return bool(self.determinant() ) def _lowerCamelCase ( self : Dict , __A : int , __A : int ): __UpperCamelCase = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(__A ).determinant() def _lowerCamelCase ( self : Dict , __A : int , __A : int ): if (row + column) % 2 == 0: return self.get_minor(__A , __A ) return -1 * self.get_minor(__A , __A ) def _lowerCamelCase ( self : List[str] ): return Matrix( [ [self.get_minor(__A , __A ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def _lowerCamelCase ( self : Union[str, Any] ): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = self.determinant() if not determinant: raise TypeError('Only matrices with a non-zero determinant have an inverse' ) return self.adjugate() * (1 / determinant) def __repr__( self : Optional[Any] ): return str(self.rows ) def __str__( self : Union[str, Any] ): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '[' + '. '.join([str(__A ) for value in row] ) + '.]' for row in self.rows ] ) + "]" ) def _lowerCamelCase ( self : List[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError('Row must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in row: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_columns: raise ValueError( 'Row must be equal in length to the other rows in the matrix' ) if position is None: self.rows.append(__A ) else: __UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:] def _lowerCamelCase ( self : Optional[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError( 'Column must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in column: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_rows: raise ValueError( 'Column must be equal in length to the other columns in the matrix' ) if position is None: __UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: __UpperCamelCase = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self : Tuple , __A : object ): if not isinstance(__A , __A ): return NotImplemented return self.rows == other.rows def __ne__( self : Any , __A : object ): return not self == other def __neg__( self : List[Any] ): return self * -1 def __add__( self : List[str] , __A : Matrix ): if self.order != other.order: raise ValueError('Addition requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self : str , __A : Matrix ): if self.order != other.order: raise ValueError('Subtraction requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self : str , __A : Matrix | int | float ): if isinstance(__A , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(__A , __A ): if self.num_columns != other.num_rows: raise ValueError( 'The number of columns in the first matrix must ' 'be equal to the number of rows in the second' ) return Matrix( [ [Matrix.dot_product(__A , __A ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( 'A Matrix can only be multiplied by an int, float, or another matrix' ) def __pow__( self : Union[str, Any] , __A : int ): if not isinstance(__A , __A ): raise TypeError('A Matrix can only be raised to the power of an int' ) if not self.is_square: raise ValueError('Only square matrices can be raised to a power' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( 'Only invertable matrices can be raised to a negative power' ) __UpperCamelCase = self for _ in range(other - 1 ): result *= self return result @classmethod def _lowerCamelCase ( cls : Tuple , __A : list[int] , __A : list[int] ): return sum(row[i] * column[i] for i in range(len(__A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
53
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : Tuple =logging.get_logger(__name__) a__ : int ={ '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="beit" def __init__( self : List[Any] , __A : List[Any]=8_1_9_2 , __A : int=7_6_8 , __A : Tuple=1_2 , __A : Optional[Any]=1_2 , __A : Union[str, Any]=3_0_7_2 , __A : Optional[Any]="gelu" , __A : Tuple=0.0 , __A : int=0.0 , __A : Optional[int]=0.02 , __A : Tuple=1e-12 , __A : Union[str, Any]=2_2_4 , __A : Tuple=1_6 , __A : Any=3 , __A : List[Any]=False , __A : str=False , __A : Any=False , __A : Optional[Any]=False , __A : Optional[Any]=0.1 , __A : Optional[int]=0.1 , __A : Optional[Any]=True , __A : Any=[3, 5, 7, 1_1] , __A : str=[1, 2, 3, 6] , __A : List[str]=True , __A : Union[str, Any]=0.4 , __A : Dict=2_5_6 , __A : Any=1 , __A : List[str]=False , __A : Tuple=2_5_5 , **__A : List[str] , ): super().__init__(**__A ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = image_size __UpperCamelCase = patch_size __UpperCamelCase = num_channels __UpperCamelCase = use_mask_token __UpperCamelCase = use_absolute_position_embeddings __UpperCamelCase = use_relative_position_bias __UpperCamelCase = use_shared_relative_position_bias __UpperCamelCase = layer_scale_init_value __UpperCamelCase = drop_path_rate __UpperCamelCase = use_mean_pooling # decode head attributes (semantic segmentation) __UpperCamelCase = out_indices __UpperCamelCase = pool_scales # auxiliary head attributes (semantic segmentation) __UpperCamelCase = use_auxiliary_head __UpperCamelCase = auxiliary_loss_weight __UpperCamelCase = auxiliary_channels __UpperCamelCase = auxiliary_num_convs __UpperCamelCase = auxiliary_concat_input __UpperCamelCase = semantic_loss_ignore_index class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =version.parse("1.11" ) @property def _lowerCamelCase ( self : Union[str, Any] ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _lowerCamelCase ( self : Tuple ): return 1e-4
53
'''simple docstring''' import os import numpy import onnx def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Dict: """simple docstring""" __UpperCamelCase = a.name __UpperCamelCase = b.name __UpperCamelCase = '' __UpperCamelCase = '' __UpperCamelCase = a == b __UpperCamelCase = name_a __UpperCamelCase = name_b return res def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : List[Any] ) -> Optional[int]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(__lowercase , __lowercase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) _graph_replace_input_with(node_proto.attribute[1].g , __lowercase , __lowercase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) def lowercase__ ( __lowercase : int , __lowercase : List[Any] , __lowercase : Dict ) -> int: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(__lowercase , __lowercase , __lowercase ) def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : str ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __UpperCamelCase = inits[i].name __UpperCamelCase = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = os.path.dirname(__lowercase ) __UpperCamelCase = os.path.basename(__lowercase ) __UpperCamelCase = onnx.load(os.path.join(__lowercase , __lowercase ) ) __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = set() __UpperCamelCase = {} __UpperCamelCase = [] __UpperCamelCase = 0 for i in range(len(__lowercase ) ): if i in dup_set: continue for j in range(i + 1 , len(__lowercase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(__lowercase ) dup_set.add(__lowercase ) __UpperCamelCase = inits[j].data_type __UpperCamelCase = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , __lowercase ) total_reduced_size += mem_size __UpperCamelCase = inits[i].name __UpperCamelCase = inits[j].name if name_i in dup_map: dup_map[name_i].append(__lowercase ) else: __UpperCamelCase = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) __UpperCamelCase = sorted(__lowercase ) _remove_dup_initializers_from_model(__lowercase , __lowercase , __lowercase ) __UpperCamelCase = 'optimized_' + model_file_name __UpperCamelCase = os.path.join(__lowercase , __lowercase ) onnx.save(__lowercase , __lowercase ) return new_model
53
1
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: a__ : Any =False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class snake_case ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int] , __A : List[str] , __A : Tuple=7 , __A : Optional[int]=3 , __A : Optional[Any]=1_8 , __A : Any=3_0 , __A : Dict=4_0_0 , __A : List[Any]=None , __A : List[str]=True , __A : Tuple=True , __A : int=None , ): __UpperCamelCase = size if size is not None else {'height': 2_0, 'width': 2_0} __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = num_channels __UpperCamelCase = image_size __UpperCamelCase = min_resolution __UpperCamelCase = max_resolution __UpperCamelCase = size __UpperCamelCase = do_normalize __UpperCamelCase = do_convert_rgb __UpperCamelCase = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6] __UpperCamelCase = patch_size if patch_size is not None else {'height': 1_6, 'width': 1_6} def _lowerCamelCase ( self : Optional[Any] ): return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg' __UpperCamelCase = Image.open(requests.get(__A , stream=__A ).raw ).convert('RGB' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , ) @require_torch @require_vision class snake_case ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict =PixaStructImageProcessor if is_vision_available() else None def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = PixaStructImageProcessingTester(self ) @property def _lowerCamelCase ( self : str ): return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , 'do_normalize' ) ) self.assertTrue(hasattr(__A , 'do_convert_rgb' ) ) def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.image_processor_tester.prepare_dummy_image() __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) __UpperCamelCase = 2_0_4_8 __UpperCamelCase = image_processor(__A , return_tensors='pt' , max_patches=__A ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def _lowerCamelCase ( self : Union[str, Any] ): # Initialize image_processor __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input __UpperCamelCase = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __UpperCamelCase = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=__A ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __UpperCamelCase = image_processor( __A , return_tensors='pt' , max_patches=__A ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _lowerCamelCase ( self : int ): # Initialize image_processor __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input __UpperCamelCase = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 __UpperCamelCase = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(__A ): __UpperCamelCase = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=__A ).flattened_patches __UpperCamelCase = 'Hello' __UpperCamelCase = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=__A , header_text=__A ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __UpperCamelCase = image_processor( __A , return_tensors='pt' , max_patches=__A , header_text=__A ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _lowerCamelCase ( self : Tuple ): # Initialize image_processor __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) __UpperCamelCase = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __UpperCamelCase = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=__A ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __UpperCamelCase = image_processor( __A , return_tensors='pt' , max_patches=__A ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _lowerCamelCase ( self : Optional[int] ): # Initialize image_processor __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input __UpperCamelCase = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __UpperCamelCase = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=__A ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __UpperCamelCase = image_processor( __A , return_tensors='pt' , max_patches=__A ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , ) @require_torch @require_vision class snake_case ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] =PixaStructImageProcessor if is_vision_available() else None def _lowerCamelCase ( self : Dict ): __UpperCamelCase = PixaStructImageProcessingTester(self , num_channels=4 ) __UpperCamelCase = 3 @property def _lowerCamelCase ( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self : Dict ): __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , 'do_normalize' ) ) self.assertTrue(hasattr(__A , 'do_convert_rgb' ) ) def _lowerCamelCase ( self : Union[str, Any] ): # Initialize image_processor __UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input __UpperCamelCase = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __UpperCamelCase = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=__A ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __UpperCamelCase = image_processor( __A , return_tensors='pt' , max_patches=__A ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
53
'''simple docstring''' import random def lowercase__ ( __lowercase : list , __lowercase : Optional[Any] ) -> tuple: """simple docstring""" __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = [], [], [] for element in data: if element < pivot: less.append(__lowercase ) elif element > pivot: greater.append(__lowercase ) else: equal.append(__lowercase ) return less, equal, greater def lowercase__ ( __lowercase : list , __lowercase : int ) -> Dict: """simple docstring""" if index >= len(__lowercase ) or index < 0: return None __UpperCamelCase = items[random.randint(0 , len(__lowercase ) - 1 )] __UpperCamelCase = 0 __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = _partition(__lowercase , __lowercase ) __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__lowercase , __lowercase ) # must be in larger else: return quick_select(__lowercase , index - (m + count) )
53
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging a__ : Any =logging.get_logger(__name__) a__ : List[str] ='''▁''' a__ : Optional[Any] ={'''vocab_file''': '''sentencepiece.bpe.model'''} a__ : Any ={ '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), } } a__ : Optional[int] ={ '''facebook/mbart-large-en-ro''': 1_024, '''facebook/mbart-large-cc25''': 1_024, } # fmt: off a__ : Optional[Any] =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Dict =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : List[str] =["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ : List[int] =[] SCREAMING_SNAKE_CASE_ : List[int] =[] def __init__( self : Optional[int] , __A : Union[str, Any] , __A : Any="<s>" , __A : List[str]="</s>" , __A : Dict="</s>" , __A : str="<s>" , __A : Optional[int]="<unk>" , __A : str="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=None , __A : Optional[int]=None , __A : Optional[Any]=None , __A : Optional[Dict[str, Any]] = None , __A : Dict=None , **__A : int , ): # Mask token behave like a normal word, i.e. include the space before it __UpperCamelCase = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token __UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , tokenizer_file=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , ) __UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__A ) ) __UpperCamelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __UpperCamelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __UpperCamelCase = 1 __UpperCamelCase = len(self.sp_model ) __UpperCamelCase = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__A ) } __UpperCamelCase = {v: k for k, v in self.lang_code_to_id.items()} __UpperCamelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __UpperCamelCase = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) __UpperCamelCase = src_lang if src_lang is not None else 'en_XX' __UpperCamelCase = self.lang_code_to_id[self._src_lang] __UpperCamelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[Any] ): __UpperCamelCase = self.__dict__.copy() __UpperCamelCase = None __UpperCamelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self : Optional[Any] , __A : int ): __UpperCamelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __UpperCamelCase = {} __UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def _lowerCamelCase ( self : Union[str, Any] ): return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _lowerCamelCase ( self : List[str] ): return self._src_lang @src_lang.setter def _lowerCamelCase ( self : Optional[int] , __A : str ): __UpperCamelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _lowerCamelCase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) __UpperCamelCase = [1] * len(self.prefix_tokens ) __UpperCamelCase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__A )) + suffix_ones return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones def _lowerCamelCase ( self : Union[str, Any] , __A : List[int] , __A : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowerCamelCase ( self : Tuple , __A : List[int] , __A : Optional[List[int]] = None ): __UpperCamelCase = [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCamelCase ( self : Union[str, Any] , __A : Union[str, Any] , __A : str , __A : Optional[str] , __A : Optional[str] , **__A : List[str] ): if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) __UpperCamelCase = src_lang __UpperCamelCase = self(__A , add_special_tokens=__A , return_tensors=__A , **__A ) __UpperCamelCase = self.convert_tokens_to_ids(__A ) __UpperCamelCase = tgt_lang_id return inputs def _lowerCamelCase ( self : str ): __UpperCamelCase = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCamelCase ( self : List[Any] , __A : str ): return self.sp_model.encode(__A , out_type=__A ) def _lowerCamelCase ( self : Optional[int] , __A : List[str] ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCamelCase = self.sp_model.PieceToId(__A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCamelCase ( self : Dict , __A : List[str] ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCamelCase ( self : Optional[Any] , __A : Optional[Any] ): __UpperCamelCase = ''.join(__A ).replace(__A , ' ' ).strip() return out_string def _lowerCamelCase ( self : Tuple , __A : str , __A : Optional[str] = None ): if not os.path.isdir(__A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCamelCase = os.path.join( __A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __A ) elif not os.path.isfile(self.vocab_file ): with open(__A , 'wb' ) as fi: __UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(__A ) return (out_vocab_file,) def _lowerCamelCase ( self : Tuple , __A : List[str] , __A : str = "en_XX" , __A : Optional[List[str]] = None , __A : str = "ro_RO" , **__A : int , ): __UpperCamelCase = src_lang __UpperCamelCase = tgt_lang return super().prepare_seqaseq_batch(__A , __A , **__A ) def _lowerCamelCase ( self : Optional[Any] ): return self.set_src_lang_special_tokens(self.src_lang ) def _lowerCamelCase ( self : Optional[Any] ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _lowerCamelCase ( self : Dict , __A : List[Any] ): __UpperCamelCase = self.lang_code_to_id[src_lang] __UpperCamelCase = [] __UpperCamelCase = [self.eos_token_id, self.cur_lang_code] def _lowerCamelCase ( self : Tuple , __A : str ): __UpperCamelCase = self.lang_code_to_id[lang] __UpperCamelCase = [] __UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
53
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowercase__ ( __lowercase : Any ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def lowercase__ ( __lowercase : Tuple ) -> int: """simple docstring""" __UpperCamelCase , __UpperCamelCase = emb.weight.shape __UpperCamelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) __UpperCamelCase = emb.weight.data return lin_layer def lowercase__ ( __lowercase : int , __lowercase : List[str]="facebook/mbart-large-en-ro" , __lowercase : str=False , __lowercase : List[Any]=False ) -> int: """simple docstring""" __UpperCamelCase = torch.load(__lowercase , map_location='cpu' )['model'] remove_ignore_keys_(__lowercase ) __UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0] __UpperCamelCase = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase ) if mbart_aa and finetuned: __UpperCamelCase = 'relu' __UpperCamelCase = state_dict['decoder.embed_tokens.weight'] __UpperCamelCase = MBartForConditionalGeneration(__lowercase ) model.model.load_state_dict(__lowercase ) if finetuned: __UpperCamelCase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a__ : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') a__ : Union[str, Any] =parser.parse_args() a__ : str =convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
53
1
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation a__ : Optional[Any] =logging.get_logger(__name__) a__ : List[Any] ={'''tokenizer_file''': '''tokenizer.json'''} a__ : Dict ={ '''tokenizer_file''': { '''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''', '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''', }, } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : Any =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Optional[Any] =["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ : Any =None def __init__( self : Optional[Any] , __A : Any=None , __A : Optional[Any]=None , __A : Optional[Any]=None , __A : int="<unk>" , __A : int="<s>" , __A : Optional[int]="</s>" , __A : Union[str, Any]="<pad>" , __A : List[str]=False , __A : int=False , **__A : List[str] , ): super().__init__( __A , __A , tokenizer_file=__A , unk_token=__A , bos_token=__A , eos_token=__A , pad_token=__A , add_prefix_space=__A , clean_up_tokenization_spaces=__A , **__A , ) __UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , __A ) != add_prefix_space: __UpperCamelCase = getattr(__A , pre_tok_state.pop('type' ) ) __UpperCamelCase = add_prefix_space __UpperCamelCase = pre_tok_class(**__A ) __UpperCamelCase = add_prefix_space def _lowerCamelCase ( self : Dict , *__A : int , **__A : List[str] ): __UpperCamelCase = kwargs.get('is_split_into_words' , __A ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ' pretokenized inputs.' ) return super()._batch_encode_plus(*__A , **__A ) def _lowerCamelCase ( self : Any , *__A : List[str] , **__A : List[Any] ): __UpperCamelCase = kwargs.get('is_split_into_words' , __A ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ' pretokenized inputs.' ) return super()._encode_plus(*__A , **__A ) def _lowerCamelCase ( self : Tuple , __A : str , __A : Optional[str] = None ): __UpperCamelCase = self._tokenizer.model.save(__A , name=__A ) return tuple(__A ) def _lowerCamelCase ( self : str , __A : "Conversation" ): __UpperCamelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__A , add_special_tokens=__A ) + [self.eos_token_id] ) if len(__A ) > self.model_max_length: __UpperCamelCase = input_ids[-self.model_max_length :] return input_ids
53
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : Any , __A : Dict , __A : str , __A : List[Any]=1_0_2_4 , __A : Tuple=1_0_2_4 , __A : str=3.6 ): __UpperCamelCase = tokenizer __UpperCamelCase = tokenizer.bos_token_id __UpperCamelCase = dataset __UpperCamelCase = seq_length __UpperCamelCase = seq_length * chars_per_token * num_of_sequences def __iter__( self : Any ): __UpperCamelCase = iter(self.dataset ) __UpperCamelCase = True while more_examples: __UpperCamelCase , __UpperCamelCase = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(__A )['content'] ) buffer_len += len(buffer[-1] ) except StopIteration: __UpperCamelCase = False break __UpperCamelCase = tokenizer(__A , truncation=__A )['input_ids'] __UpperCamelCase = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(__A ) , self.seq_length ): __UpperCamelCase = all_token_ids[i : i + self.seq_length] if len(__A ) == self.seq_length: yield torch.tensor(__A ) def lowercase__ ( __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = {'streaming': True} __UpperCamelCase = load_dataset(args.dataset_name , split='train' , **__lowercase ) __UpperCamelCase = ConstantLengthDataset(__lowercase , __lowercase , seq_length=args.seq_length ) __UpperCamelCase = DataLoader(__lowercase , batch_size=args.batch_size ) return eval_dataloader def lowercase__ ( __lowercase : Tuple ) -> Optional[Any]: """simple docstring""" model.eval() __UpperCamelCase = [] for step, batch in enumerate(__lowercase ): with torch.no_grad(): __UpperCamelCase = model(__lowercase , labels=__lowercase ) __UpperCamelCase = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__lowercase ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break __UpperCamelCase = torch.mean(torch.cat(__lowercase ) ) try: __UpperCamelCase = torch.exp(__lowercase ) except OverflowError: __UpperCamelCase = float('inf' ) return loss.item(), perplexity.item() # Setup Accelerator a__ : int =Accelerator() # Parse configuration a__ : Dict =HfArgumentParser(EvaluationArguments) a__ : Union[str, Any] =parser.parse_args() set_seed(args.seed) # Logging a__ : List[Any] =logging.getLogger(__name__) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) # Load model and tokenizer a__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(args.model_ckpt) a__ : List[Any] =AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader a__ : Union[str, Any] =create_dataloader(args) # Prepare everything with our `accelerator`. a__ , a__ : List[str] =accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('''Evaluating and saving model after training''') a__ , a__ : Any =evaluate(args) logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
53
1
'''simple docstring''' import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''): raise Exception('''requires fairseq >= 1.0.0a''') logging.set_verbosity_info() a__ : int =logging.get_logger(__name__) a__ : Union[str, Any] ='''Hello world! cécé herlolip''' def lowercase__ ( __lowercase : str , __lowercase : str , __lowercase : bool ) -> Tuple: """simple docstring""" __UpperCamelCase = FairseqRobertaModel.from_pretrained(__lowercase ) roberta.eval() # disable dropout __UpperCamelCase = roberta.model.encoder.sentence_encoder __UpperCamelCase = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: __UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0] print('Our RoBERTa config:' , __lowercase ) __UpperCamelCase = XLMRobertaXLForSequenceClassification(__lowercase ) if classification_head else XLMRobertaXLForMaskedLM(__lowercase ) model.eval() # Now let's copy all the weights. # Embeddings __UpperCamelCase = roberta_sent_encoder.embed_tokens.weight __UpperCamelCase = roberta_sent_encoder.embed_positions.weight __UpperCamelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. __UpperCamelCase = roberta_sent_encoder.layer_norm.weight __UpperCamelCase = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __UpperCamelCase = model.roberta.encoder.layer[i] __UpperCamelCase = roberta_sent_encoder.layers[i] __UpperCamelCase = layer.attention __UpperCamelCase = roberta_layer.self_attn_layer_norm.weight __UpperCamelCase = roberta_layer.self_attn_layer_norm.bias # self attention __UpperCamelCase = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) __UpperCamelCase = roberta_layer.self_attn.q_proj.weight __UpperCamelCase = roberta_layer.self_attn.q_proj.bias __UpperCamelCase = roberta_layer.self_attn.k_proj.weight __UpperCamelCase = roberta_layer.self_attn.k_proj.bias __UpperCamelCase = roberta_layer.self_attn.v_proj.weight __UpperCamelCase = roberta_layer.self_attn.v_proj.bias # self-attention output __UpperCamelCase = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape __UpperCamelCase = roberta_layer.self_attn.out_proj.weight __UpperCamelCase = roberta_layer.self_attn.out_proj.bias # this one is final layer norm __UpperCamelCase = roberta_layer.final_layer_norm.weight __UpperCamelCase = roberta_layer.final_layer_norm.bias # intermediate __UpperCamelCase = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape __UpperCamelCase = roberta_layer.fca.weight __UpperCamelCase = roberta_layer.fca.bias # output __UpperCamelCase = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape __UpperCamelCase = roberta_layer.fca.weight __UpperCamelCase = roberta_layer.fca.bias # end of layer if classification_head: __UpperCamelCase = roberta.model.classification_heads['mnli'].dense.weight __UpperCamelCase = roberta.model.classification_heads['mnli'].dense.bias __UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.weight __UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.bias else: # LM Head __UpperCamelCase = roberta.model.encoder.lm_head.dense.weight __UpperCamelCase = roberta.model.encoder.lm_head.dense.bias __UpperCamelCase = roberta.model.encoder.lm_head.layer_norm.weight __UpperCamelCase = roberta.model.encoder.lm_head.layer_norm.bias __UpperCamelCase = roberta.model.encoder.lm_head.weight __UpperCamelCase = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. __UpperCamelCase = roberta.encode(__lowercase ).unsqueeze(0 ) # batch of size 1 __UpperCamelCase = model(__lowercase )[0] if classification_head: __UpperCamelCase = roberta.model.classification_heads['mnli'](roberta.extract_features(__lowercase ) ) else: __UpperCamelCase = roberta.model(__lowercase )[0] print(our_output.shape , their_output.shape ) __UpperCamelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 __UpperCamelCase = torch.allclose(__lowercase , __lowercase , atol=1e-3 ) print('Do both models output the same tensors?' , '🔥' if success else '💩' ) if not success: raise Exception('Something went wRoNg' ) pathlib.Path(__lowercase ).mkdir(parents=__lowercase , exist_ok=__lowercase ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) if __name__ == "__main__": a__ : int =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) a__ : List[Any] =parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
53
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging a__ : Any =logging.get_logger(__name__) a__ : Optional[Any] ={ '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict ="gpt_neo" SCREAMING_SNAKE_CASE_ : Optional[int] =["past_key_values"] SCREAMING_SNAKE_CASE_ : List[Any] ={"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Union[str, Any] , __A : Union[str, Any]=5_0_2_5_7 , __A : Any=2_0_4_8 , __A : Optional[Any]=2_0_4_8 , __A : Any=2_4 , __A : Union[str, Any]=[[["global", "local"], 1_2]] , __A : str=1_6 , __A : Optional[int]=None , __A : Union[str, Any]=2_5_6 , __A : Any="gelu_new" , __A : Dict=0.0 , __A : Optional[int]=0.0 , __A : int=0.0 , __A : List[str]=0.1 , __A : Any=1e-5 , __A : int=0.02 , __A : List[str]=True , __A : Tuple=5_0_2_5_6 , __A : Optional[Any]=5_0_2_5_6 , **__A : Optional[Any] , ): __UpperCamelCase = vocab_size __UpperCamelCase = max_position_embeddings __UpperCamelCase = hidden_size __UpperCamelCase = num_layers __UpperCamelCase = num_heads __UpperCamelCase = intermediate_size __UpperCamelCase = window_size __UpperCamelCase = activation_function __UpperCamelCase = resid_dropout __UpperCamelCase = embed_dropout __UpperCamelCase = attention_dropout __UpperCamelCase = classifier_dropout __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = initializer_range __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id __UpperCamelCase = attention_types __UpperCamelCase = self.expand_attention_types_params(__A ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=__A , eos_token_id=__A , **__A ) @staticmethod def _lowerCamelCase ( __A : Tuple ): __UpperCamelCase = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase__ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Any: """simple docstring""" import torch __UpperCamelCase = input.size() __UpperCamelCase = len(__lowercase ) __UpperCamelCase = shape[dimension] __UpperCamelCase = torch.arange(0 , __lowercase , __lowercase ) __UpperCamelCase = torch.div(sizedim - size , __lowercase , rounding_mode='floor' ) + 1 __UpperCamelCase = torch.arange(__lowercase ) + low_indices[:min_length][:, None] __UpperCamelCase = [slice(__lowercase )] * rank __UpperCamelCase = indices __UpperCamelCase = input[s] __UpperCamelCase = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Optional[int]: """simple docstring""" import torch __UpperCamelCase = torch.arange(1 , __lowercase ) __UpperCamelCase = torch.remainder(__lowercase , __lowercase ) __UpperCamelCase = remainders == 0 __UpperCamelCase = candidates[divisor_indices] __UpperCamelCase = torch.max(__lowercase ) return largest_divisor, torch.div(__lowercase , __lowercase , rounding_mode='floor' ) class snake_case ( __lowerCamelCase ): """simple docstring""" @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(__A , direction='inputs' ) __UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return common_inputs @property def _lowerCamelCase ( self : int ): return self._config.num_heads def _lowerCamelCase ( self : List[str] , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ): __UpperCamelCase = super(__A , self ).generate_dummy_inputs( __A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A ) # We need to order the input in the way they appears in the forward() __UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCamelCase = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] __UpperCamelCase = common_inputs['attention_mask'] if self.use_past: __UpperCamelCase = ordered_inputs['attention_mask'].dtype __UpperCamelCase = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 ) return ordered_inputs @property def _lowerCamelCase ( self : Dict ): return 1_3
53
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : List[str] =logging.get_logger(__name__) a__ : Optional[int] ={ '''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''', '''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''', '''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''', '''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''', '''funnel-transformer/intermediate''': ( '''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json''' ), '''funnel-transformer/intermediate-base''': ( '''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json''' ), '''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''', '''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''', '''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''', '''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''', } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str ="funnel" SCREAMING_SNAKE_CASE_ : Dict ={ "hidden_size": "d_model", "num_attention_heads": "n_head", } def __init__( self : int , __A : Any=3_0_5_2_2 , __A : List[Any]=[4, 4, 4] , __A : str=None , __A : Optional[int]=2 , __A : Tuple=7_6_8 , __A : Dict=1_2 , __A : Optional[int]=6_4 , __A : Dict=3_0_7_2 , __A : Optional[int]="gelu_new" , __A : Any=0.1 , __A : int=0.1 , __A : Union[str, Any]=0.0 , __A : Optional[Any]=0.1 , __A : Tuple=None , __A : Optional[int]=1e-9 , __A : Any="mean" , __A : List[str]="relative_shift" , __A : List[Any]=True , __A : List[str]=True , __A : List[str]=True , **__A : Optional[int] , ): __UpperCamelCase = vocab_size __UpperCamelCase = block_sizes __UpperCamelCase = [1] * len(__A ) if block_repeats is None else block_repeats assert len(__A ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." __UpperCamelCase = num_decoder_layers __UpperCamelCase = d_model __UpperCamelCase = n_head __UpperCamelCase = d_head __UpperCamelCase = d_inner __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout __UpperCamelCase = attention_dropout __UpperCamelCase = activation_dropout __UpperCamelCase = initializer_range __UpperCamelCase = initializer_std __UpperCamelCase = layer_norm_eps assert pooling_type in [ "mean", "max", ], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.''' __UpperCamelCase = pooling_type assert attention_type in [ "relative_shift", "factorized", ], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.''' __UpperCamelCase = attention_type __UpperCamelCase = separate_cls __UpperCamelCase = truncate_seq __UpperCamelCase = pool_q_only super().__init__(**__A ) @property def _lowerCamelCase ( self : int ): return sum(self.block_sizes ) @num_hidden_layers.setter def _lowerCamelCase ( self : List[Any] , __A : List[Any] ): raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' ) @property def _lowerCamelCase ( self : Optional[Any] ): return len(self.block_sizes ) @num_blocks.setter def _lowerCamelCase ( self : Any , __A : int ): raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
53
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa" SCREAMING_SNAKE_CASE_ : Dict =( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) SCREAMING_SNAKE_CASE_ : List[str] ="document_qa" SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"] SCREAMING_SNAKE_CASE_ : Any =["text"] def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ): if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*__A , **__A ) def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ): __UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' __UpperCamelCase = task_prompt.replace('{user_input}' , __A ) __UpperCamelCase = self.pre_processor.tokenizer( __A , add_special_tokens=__A , return_tensors='pt' ).input_ids __UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ): return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences def _lowerCamelCase ( self : Tuple , __A : List[Any] ): __UpperCamelCase = self.pre_processor.batch_decode(__A )[0] __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) __UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token __UpperCamelCase = self.pre_processor.tokenajson(__A ) return sequence["answer"]
53
1
'''simple docstring''' import os import string import sys a__ : Dict =1 << 8 a__ : Union[str, Any] ={ '''tab''': ord('''\t'''), '''newline''': ord('''\r'''), '''esc''': 27, '''up''': 65 + ARROW_KEY_FLAG, '''down''': 66 + ARROW_KEY_FLAG, '''right''': 67 + ARROW_KEY_FLAG, '''left''': 68 + ARROW_KEY_FLAG, '''mod_int''': 91, '''undefined''': sys.maxsize, '''interrupt''': 3, '''insert''': 50, '''delete''': 51, '''pg_up''': 53, '''pg_down''': 54, } a__ : Tuple =KEYMAP['''up'''] a__ : Any =KEYMAP['''left'''] if sys.platform == "win32": a__ : Union[str, Any] =[] a__ : Union[str, Any] ={ B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, } for i in range(10): a__ : Union[str, Any] =ord(str(i)) def lowercase__ ( ) -> List[Any]: """simple docstring""" if os.name == "nt": import msvcrt __UpperCamelCase = 'mbcs' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(__lowercase ) == 0: # Read the keystroke __UpperCamelCase = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): __UpperCamelCase = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: __UpperCamelCase = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) ) WIN_CH_BUFFER.append(__lowercase ) if ord(__lowercase ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) __UpperCamelCase = chr(KEYMAP['esc'] ) except KeyError: __UpperCamelCase = cha[1] else: __UpperCamelCase = ch.decode(__lowercase ) else: __UpperCamelCase = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty __UpperCamelCase = sys.stdin.fileno() __UpperCamelCase = termios.tcgetattr(__lowercase ) try: tty.setraw(__lowercase ) __UpperCamelCase = sys.stdin.read(1 ) finally: termios.tcsetattr(__lowercase , termios.TCSADRAIN , __lowercase ) return ch def lowercase__ ( ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = get_raw_chars() if ord(__lowercase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(__lowercase ) == KEYMAP["esc"]: __UpperCamelCase = get_raw_chars() if ord(__lowercase ) == KEYMAP["mod_int"]: __UpperCamelCase = get_raw_chars() if ord(__lowercase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowercase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(__lowercase ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
53
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
53
1
'''simple docstring''' def lowercase__ ( __lowercase : int = 50000000 ) -> int: """simple docstring""" __UpperCamelCase = set() __UpperCamelCase = int((limit - 24) ** (1 / 2) ) __UpperCamelCase = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , __lowercase ) ) ) for primea in primes: __UpperCamelCase = primea * primea for primea in primes: __UpperCamelCase = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: __UpperCamelCase = primea * primea * primea * primea __UpperCamelCase = square + cube + tetr if total >= limit: break ret.add(__lowercase ) return len(__lowercase ) if __name__ == "__main__": print(f'{solution() = }')
53
'''simple docstring''' import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase__ ( __lowercase : Features ) -> Optional[int]: """simple docstring""" __UpperCamelCase = np.inf def set_batch_size(__lowercase : FeatureType ) -> None: nonlocal batch_size if isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary": __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__lowercase , __lowercase ) return None if batch_size is np.inf else batch_size class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : List[str] , __A : NestedDataStructureLike[PathLike] , __A : Optional[NamedSplit] = None , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[int] = None , **__A : Dict , ): super().__init__( __A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , ) __UpperCamelCase = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths} __UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1] __UpperCamelCase = Parquet( cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , ) def _lowerCamelCase ( self : Optional[int] ): # Build iterable dataset if self.streaming: __UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None self.builder.download_and_prepare( download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , ) __UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=__A , in_memory=self.keep_in_memory ) return dataset class snake_case : """simple docstring""" def __init__( self : List[str] , __A : Dataset , __A : Union[PathLike, BinaryIO] , __A : Optional[int] = None , **__A : Dict , ): __UpperCamelCase = dataset __UpperCamelCase = path_or_buf __UpperCamelCase = batch_size or get_writer_batch_size(dataset.features ) __UpperCamelCase = parquet_writer_kwargs def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , 'wb+' ) as buffer: __UpperCamelCase = self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs ) else: __UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs ) return written def _lowerCamelCase ( self : List[str] , __A : BinaryIO , __A : int , **__A : List[str] ): __UpperCamelCase = 0 __UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __A ) __UpperCamelCase = self.dataset.features.arrow_schema __UpperCamelCase = pq.ParquetWriter(__A , schema=__A , **__A ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): __UpperCamelCase = query_table( table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__A ) written += batch.nbytes writer.close() return written
53
1
'''simple docstring''' import os from collections.abc import Iterator def lowercase__ ( __lowercase : str = "." ) -> Iterator[str]: """simple docstring""" for dir_path, dir_names, filenames in os.walk(__lowercase ): __UpperCamelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(__lowercase )[1] in (".py", ".ipynb"): yield os.path.join(__lowercase , __lowercase ).lstrip('./' ) def lowercase__ ( __lowercase : Dict ) -> Union[str, Any]: """simple docstring""" return F'''{i * ' '}*''' if i else "\n##" def lowercase__ ( __lowercase : str , __lowercase : str ) -> str: """simple docstring""" __UpperCamelCase = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(__lowercase ) or old_parts[i] != new_part) and new_part: print(F'''{md_prefix(__lowercase )} {new_part.replace('_' , ' ' ).title()}''' ) return new_path def lowercase__ ( __lowercase : str = "." ) -> None: """simple docstring""" __UpperCamelCase = '' for filepath in sorted(good_file_paths(__lowercase ) ): __UpperCamelCase , __UpperCamelCase = os.path.split(__lowercase ) if filepath != old_path: __UpperCamelCase = print_path(__lowercase , __lowercase ) __UpperCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0 __UpperCamelCase = F'''{filepath}/{filename}'''.replace(' ' , '%20' ) __UpperCamelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0] print(F'''{md_prefix(__lowercase )} [{filename}]({url})''' ) if __name__ == "__main__": print_directory_md('''.''')
53
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def lowercase__ ( __lowercase : SplitDict ) -> int: """simple docstring""" __UpperCamelCase = split_dict._to_yaml_list() assert len(__lowercase ) == len(__lowercase ) __UpperCamelCase = SplitDict._from_yaml_list(__lowercase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump __UpperCamelCase = None # the split name of split_dict takes over the name of the split info object __UpperCamelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] ) def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" __UpperCamelCase = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
53
1
'''simple docstring''' def lowercase__ ( __lowercase : int ) -> int: """simple docstring""" if n == 1 or not isinstance(__lowercase , __lowercase ): return 0 elif n == 2: return 1 else: __UpperCamelCase = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def lowercase__ ( __lowercase : int ) -> int: """simple docstring""" __UpperCamelCase = 0 __UpperCamelCase = 2 while digits < n: index += 1 __UpperCamelCase = len(str(fibonacci(__lowercase ) ) ) return index def lowercase__ ( __lowercase : int = 1000 ) -> int: """simple docstring""" return fibonacci_digits_index(__lowercase ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
53
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[str] ={ '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any =[ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
1
'''simple docstring''' def lowercase__ ( __lowercase : int ) -> bool: """simple docstring""" if num < 0: return False __UpperCamelCase = num __UpperCamelCase = 0 while num > 0: __UpperCamelCase = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
53
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a__ : str =logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str =["input_features", "attention_mask"] def __init__( self : Union[str, Any] , __A : Optional[int]=8_0 , __A : Tuple=1_6_0_0_0 , __A : Optional[Any]=8_0 , __A : Any=0.0 , __A : Any=True , __A : List[str]=True , __A : str=True , **__A : List[Any] , ): super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A ) __UpperCamelCase = num_mel_bins __UpperCamelCase = do_ceptral_normalize __UpperCamelCase = normalize_means __UpperCamelCase = normalize_vars __UpperCamelCase = True def _lowerCamelCase ( self : Union[str, Any] , __A : np.ndarray , ): __UpperCamelCase = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers __UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 ) __UpperCamelCase = ta_kaldi.fbank(__A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _lowerCamelCase ( __A : np.ndarray , __A : int , __A : Optional[bool] = True , __A : Optional[bool] = True , __A : float = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: __UpperCamelCase = x[:input_length].mean(axis=0 ) __UpperCamelCase = np.subtract(__A , __A ) if normalize_vars: __UpperCamelCase = x[:input_length].std(axis=0 ) __UpperCamelCase = np.divide(__A , __A ) if input_length < x.shape[0]: __UpperCamelCase = padding_value # make sure array is in float32 __UpperCamelCase = x.astype(np.floataa ) return x def _lowerCamelCase ( self : int , __A : List[np.ndarray] , __A : Optional[np.ndarray] = None ): __UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__A , __A , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(__A , __A ) ] def __call__( self : List[Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : Union[bool, str, PaddingStrategy] = False , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[int] = None , __A : Optional[bool] = None , **__A : Dict , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __UpperCamelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) __UpperCamelCase = is_batched_numpy or ( isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__A , np.ndarray ): __UpperCamelCase = np.asarray(__A , dtype=np.floataa ) elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __UpperCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __UpperCamelCase = [raw_speech] # extract fbank features __UpperCamelCase = [self._extract_fbank_features(__A ) for waveform in raw_speech] # convert into correct format for padding __UpperCamelCase = BatchFeature({'input_features': features} ) __UpperCamelCase = self.pad( __A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , ) # make sure list is in array format __UpperCamelCase = padded_inputs.get('input_features' ) if isinstance(input_features[0] , __A ): __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_features] __UpperCamelCase = padded_inputs.get('attention_mask' ) if attention_mask is not None: __UpperCamelCase = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __UpperCamelCase = ( np.array(__A , dtype=np.intaa ) if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD else None ) __UpperCamelCase = self.normalize( padded_inputs['input_features'] , attention_mask=__A ) if return_tensors is not None: __UpperCamelCase = padded_inputs.convert_to_tensors(__A ) return padded_inputs
53
1
'''simple docstring''' def lowercase__ ( __lowercase : int , __lowercase : int ) -> int: """simple docstring""" while b: __UpperCamelCase , __UpperCamelCase = b, a % b return a def lowercase__ ( __lowercase : int , __lowercase : int ) -> int: """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(__lowercase , a % b ) def lowercase__ ( ) -> Union[str, Any]: """simple docstring""" print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' ) print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' ) print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' ) print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' ) print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' ) print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' ) print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' ) print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' ) print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' ) if __name__ == "__main__": main()
53
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : List[Any] =logging.get_logger(__name__) a__ : List[Any] ={ '''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model" def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ): super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = project_dim class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model" def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ): super().__init__(**__A ) __UpperCamelCase = hidden_size __UpperCamelCase = intermediate_size __UpperCamelCase = projection_dim __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = num_channels __UpperCamelCase = patch_size __UpperCamelCase = image_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = attention_dropout __UpperCamelCase = layer_norm_eps __UpperCamelCase = hidden_act @classmethod def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ): cls._set_token_in_kwargs(__A ) __UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('model_type' ) == "altclip": __UpperCamelCase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] ="altclip" SCREAMING_SNAKE_CASE_ : Optional[int] =True def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). __UpperCamelCase = kwargs.pop('text_config_dict' , __A ) __UpperCamelCase = kwargs.pop('vision_config_dict' , __A ) super().__init__(**__A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __UpperCamelCase = {} # This is the complete result when using `text_config_dict`. __UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __UpperCamelCase = {} # This is the complete result when using `vision_config_dict`. __UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __UpperCamelCase = { str(__A ): value for key, value in _vision_config_dict['id2label'].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __UpperCamelCase = {} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' ) if vision_config is None: __UpperCamelCase = {} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' ) __UpperCamelCase = AltCLIPTextConfig(**__A ) __UpperCamelCase = AltCLIPVisionConfig(**__A ) __UpperCamelCase = projection_dim __UpperCamelCase = logit_scale_init_value __UpperCamelCase = 1.0 @classmethod def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = copy.deepcopy(self.__dict__ ) __UpperCamelCase = self.text_config.to_dict() __UpperCamelCase = self.vision_config.to_dict() __UpperCamelCase = self.__class__.model_type return output
53
1
'''simple docstring''' import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () a__ : Tuple =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). a__ : Tuple =[0, 25, 50] a__ : List[str] =[25, 50, 75] a__ : List[Any] =fuzz.membership.trimf(X, abca) a__ : List[str] =fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. a__ : Dict =np.ones(75) a__ : Tuple =np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) a__ : Optional[int] =fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) a__ : Union[str, Any] =fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) a__ : str =fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) a__ : Optional[Any] =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] a__ : List[str] =young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) a__ : Optional[int] =young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] a__ : Union[str, Any] =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] a__ : int =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('''Young''') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('''Middle aged''') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('''union''') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('''intersection''') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('''complement_a''') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('''difference a/b''') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('''alg_sum''') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('''alg_product''') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('''bdd_sum''') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('''bdd_difference''') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
53
'''simple docstring''' import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Any ) -> Optional[Any]: """simple docstring""" with open(__lowercase ) as metadata_file: __UpperCamelCase = json.load(__lowercase ) __UpperCamelCase = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['model_config'] ) # Load in the weights from the checkpoint_path __UpperCamelCase = torch.load(__lowercase , map_location='cpu' ) # Load the entity vocab file __UpperCamelCase = load_entity_vocab(__lowercase ) __UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks __UpperCamelCase = AddedToken('<ent>' , lstrip=__lowercase , rstrip=__lowercase ) __UpperCamelCase = AddedToken('<ent2>' , lstrip=__lowercase , rstrip=__lowercase ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(__lowercase ) with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(__lowercase , __lowercase ) __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase ) # Initialize the embeddings of the special tokens __UpperCamelCase = state_dict['embeddings.word_embeddings.weight'] __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 ) __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 ) __UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __UpperCamelCase = F'''encoder.layer.{layer_index}.attention.self.''' __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight'] __UpperCamelCase = entity_emb[entity_vocab['[MASK]']] __UpperCamelCase = LukeModel(config=__lowercase ).eval() __UpperCamelCase , __UpperCamelCase = model.load_state_dict(__lowercase , strict=__lowercase ) if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'''Missing keys {', '.join(__lowercase )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )): raise ValueError( 'Unexpected keys' F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' ) # Check outputs __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase , task='entity_classification' ) __UpperCamelCase = ( 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the' ' new world number one avoid a humiliating second- round exit at Wimbledon .' ) __UpperCamelCase = (39, 42) __UpperCamelCase = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors='pt' ) __UpperCamelCase = model(**__lowercase ) # Verify word hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 42, 1024) ) __UpperCamelCase = torch.tensor( [[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] ) else: # base __UpperCamelCase = torch.Size((1, 42, 768) ) __UpperCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 1, 1024) ) __UpperCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] ) else: # base __UpperCamelCase = torch.Size((1, 1, 768) ) __UpperCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(__lowercase ) ) model.save_pretrained(__lowercase ) def lowercase__ ( __lowercase : Dict ) -> List[str]: """simple docstring""" __UpperCamelCase = {} with open(__lowercase , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(__lowercase ): __UpperCamelCase , __UpperCamelCase = line.rstrip().split('\t' ) __UpperCamelCase = index return entity_vocab if __name__ == "__main__": a__ : Any =argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) a__ : str =parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
53
1
'''simple docstring''' from __future__ import annotations class snake_case : """simple docstring""" def __init__( self : Optional[int] , __A : list[list[int]] ): __UpperCamelCase = TypeError( 'Matrices must be formed from a list of zero or more lists containing at ' 'least one and the same number of values, each of which must be of type ' 'int or float.' ) if len(__A ) != 0: __UpperCamelCase = len(rows[0] ) if cols == 0: raise error for row in rows: if len(__A ) != cols: raise error for value in row: if not isinstance(__A , (int, float) ): raise error __UpperCamelCase = rows else: __UpperCamelCase = [] def _lowerCamelCase ( self : int ): return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def _lowerCamelCase ( self : str ): return len(self.rows ) @property def _lowerCamelCase ( self : Any ): return len(self.rows[0] ) @property def _lowerCamelCase ( self : Optional[Any] ): return (self.num_rows, self.num_columns) @property def _lowerCamelCase ( self : Dict ): return self.order[0] == self.order[1] def _lowerCamelCase ( self : Any ): __UpperCamelCase = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Any ): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def _lowerCamelCase ( self : List[str] ): return bool(self.determinant() ) def _lowerCamelCase ( self : Dict , __A : int , __A : int ): __UpperCamelCase = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(__A ).determinant() def _lowerCamelCase ( self : Dict , __A : int , __A : int ): if (row + column) % 2 == 0: return self.get_minor(__A , __A ) return -1 * self.get_minor(__A , __A ) def _lowerCamelCase ( self : List[str] ): return Matrix( [ [self.get_minor(__A , __A ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def _lowerCamelCase ( self : Union[str, Any] ): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = self.determinant() if not determinant: raise TypeError('Only matrices with a non-zero determinant have an inverse' ) return self.adjugate() * (1 / determinant) def __repr__( self : Optional[Any] ): return str(self.rows ) def __str__( self : Union[str, Any] ): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '[' + '. '.join([str(__A ) for value in row] ) + '.]' for row in self.rows ] ) + "]" ) def _lowerCamelCase ( self : List[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError('Row must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in row: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_columns: raise ValueError( 'Row must be equal in length to the other rows in the matrix' ) if position is None: self.rows.append(__A ) else: __UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:] def _lowerCamelCase ( self : Optional[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError( 'Column must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in column: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_rows: raise ValueError( 'Column must be equal in length to the other columns in the matrix' ) if position is None: __UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: __UpperCamelCase = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self : Tuple , __A : object ): if not isinstance(__A , __A ): return NotImplemented return self.rows == other.rows def __ne__( self : Any , __A : object ): return not self == other def __neg__( self : List[Any] ): return self * -1 def __add__( self : List[str] , __A : Matrix ): if self.order != other.order: raise ValueError('Addition requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self : str , __A : Matrix ): if self.order != other.order: raise ValueError('Subtraction requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self : str , __A : Matrix | int | float ): if isinstance(__A , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(__A , __A ): if self.num_columns != other.num_rows: raise ValueError( 'The number of columns in the first matrix must ' 'be equal to the number of rows in the second' ) return Matrix( [ [Matrix.dot_product(__A , __A ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( 'A Matrix can only be multiplied by an int, float, or another matrix' ) def __pow__( self : Union[str, Any] , __A : int ): if not isinstance(__A , __A ): raise TypeError('A Matrix can only be raised to the power of an int' ) if not self.is_square: raise ValueError('Only square matrices can be raised to a power' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( 'Only invertable matrices can be raised to a negative power' ) __UpperCamelCase = self for _ in range(other - 1 ): result *= self return result @classmethod def _lowerCamelCase ( cls : Tuple , __A : list[int] , __A : list[int] ): return sum(row[i] * column[i] for i in range(len(__A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
53
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class snake_case ( __lowerCamelCase ): """simple docstring""" def _lowerCamelCase ( self : Any ): __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = 8 # DPR tok __UpperCamelCase = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __UpperCamelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok __UpperCamelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) ) __UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __UpperCamelCase = {'unk_token': '<unk>'} __UpperCamelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__A ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__A ) ) def _lowerCamelCase ( self : Tuple ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Optional[int] ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Union[str, Any] ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def _lowerCamelCase ( self : str ): shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _lowerCamelCase ( self : Any , __A : bool ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , ) if from_disk: __UpperCamelCase = os.path.join(self.tmpdirname , 'dataset' ) __UpperCamelCase = os.path.join(self.tmpdirname , 'index.faiss' ) dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) ) dataset.drop_index('embeddings' ) dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) ) del dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , ) return retriever def _lowerCamelCase ( self : int ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) __UpperCamelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' ) dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' ) pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) ) __UpperCamelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' ) __UpperCamelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset} pickle.dump(__A , open(__A , 'wb' ) ) __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , ) __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = self.get_dummy_dataset() retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_legacy_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] ) self.assertEqual(len(doc_dicts[0]['text'] ) , __A ) self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Optional[Any] ): import torch __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , np.ndarray ) __UpperCamelCase = retriever( __A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( # noqa: F841 out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], out['doc_ids'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dpr_ctx_encoder_tokenizer() __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) retriever.set_ctx_encoder_tokenizer(__A ) __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) self.assertEqual( len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
53
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available a__ : Union[str, Any] ={ '''configuration_audio_spectrogram_transformer''': [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ASTConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Union[str, Any] =[ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ASTForAudioClassification''', '''ASTModel''', '''ASTPreTrainedModel''', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] =['''ASTFeatureExtractor'''] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys a__ : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[Any] ={ '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] =[ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys a__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
1
'''simple docstring''' import random import sys import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap a__ : Union[str, Any] ='''Usage of script: script_name <size_of_canvas:int>''' a__ : Tuple =[0] * 100 + [1] * 10 random.shuffle(choice) def lowercase__ ( __lowercase : int ) -> list[list[bool]]: """simple docstring""" __UpperCamelCase = [[False for i in range(__lowercase )] for j in range(__lowercase )] return canvas def lowercase__ ( __lowercase : list[list[bool]] ) -> None: """simple docstring""" for i, row in enumerate(__lowercase ): for j, _ in enumerate(__lowercase ): __UpperCamelCase = bool(random.getrandbits(1 ) ) def lowercase__ ( __lowercase : list[list[bool]] ) -> list[list[bool]]: """simple docstring""" __UpperCamelCase = np.array(__lowercase ) __UpperCamelCase = np.array(create_canvas(current_canvas.shape[0] ) ) for r, row in enumerate(__lowercase ): for c, pt in enumerate(__lowercase ): __UpperCamelCase = __judge_point( __lowercase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] ) __UpperCamelCase = next_gen_canvas del next_gen_canvas # cleaning memory as we move on. __UpperCamelCase = current_canvas.tolist() return return_canvas def lowercase__ ( __lowercase : bool , __lowercase : list[list[bool]] ) -> bool: """simple docstring""" __UpperCamelCase = 0 __UpperCamelCase = 0 # finding dead or alive neighbours count. for i in neighbours: for status in i: if status: alive += 1 else: dead += 1 # handling duplicate entry for focus pt. if pt: alive -= 1 else: dead -= 1 # running the rules of game here. __UpperCamelCase = pt if pt: if alive < 2: __UpperCamelCase = False elif alive == 2 or alive == 3: __UpperCamelCase = True elif alive > 3: __UpperCamelCase = False else: if alive == 3: __UpperCamelCase = True return state if __name__ == "__main__": if len(sys.argv) != 2: raise Exception(usage_doc) a__ : Union[str, Any] =int(sys.argv[1]) # main working structure of this module. a__ : Any =create_canvas(canvas_size) seed(c) a__ , a__ : Union[str, Any] =plt.subplots() fig.show() a__ : Tuple =ListedColormap(['''w''', '''k''']) try: while True: a__ : int =run(c) ax.matshow(c, cmap=cmap) fig.canvas.draw() ax.cla() except KeyboardInterrupt: # do nothing. pass
53
'''simple docstring''' import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any]=False ) -> Tuple: """simple docstring""" try: __UpperCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __UpperCamelCase = default else: # KEY is set, convert it to True or False. try: __UpperCamelCase = strtobool(__lowercase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value a__ : str =parse_flag_from_env('''RUN_SLOW''', default=False) a__ : Union[str, Any] =parse_flag_from_env('''RUN_REMOTE''', default=False) a__ : List[str] =parse_flag_from_env('''RUN_LOCAL''', default=True) a__ : Optional[int] =parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression a__ : Any =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') a__ : Optional[int] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') a__ : List[str] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio a__ : Any =pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam a__ : Tuple =pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility a__ : Union[str, Any] =pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows a__ : int =pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase__ ( __lowercase : Optional[Any] ) -> Optional[int]: """simple docstring""" try: import faiss # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires faiss' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Any: """simple docstring""" try: import regex # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires regex' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Tuple ) -> List[Any]: """simple docstring""" try: import elasticsearch # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires elasticsearch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Tuple: """simple docstring""" try: import sqlalchemy # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires sqlalchemy' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[str] ) -> List[str]: """simple docstring""" if not config.TORCH_AVAILABLE: __UpperCamelCase = unittest.skip('test requires PyTorch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[Any] ) -> List[str]: """simple docstring""" if not config.TF_AVAILABLE: __UpperCamelCase = unittest.skip('test requires TensorFlow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : int ) -> Union[str, Any]: """simple docstring""" if not config.JAX_AVAILABLE: __UpperCamelCase = unittest.skip('test requires JAX' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> Optional[Any]: """simple docstring""" if not config.PIL_AVAILABLE: __UpperCamelCase = unittest.skip('test requires Pillow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : int ) -> int: """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> int: """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> Any: """simple docstring""" def _require_spacy_model(__lowercase : Any ): try: import spacy # noqa F401 spacy.load(__lowercase ) except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(__lowercase ) )(__lowercase ) else: return test_case return _require_spacy_model def lowercase__ ( __lowercase : Union[str, Any] ) -> str: """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]: """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: __UpperCamelCase = unittest.skip('test is slow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_local_tests or _run_local_tests == 0: __UpperCamelCase = unittest.skip('test is local' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: __UpperCamelCase = unittest.skip('test is packaged' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Any: """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: __UpperCamelCase = unittest.skip('test requires remote' )(__lowercase ) return test_case def lowercase__ ( *__lowercase : Optional[Any] ) -> Tuple: """simple docstring""" def decorate(cls : int ): for name, fn in cls.__dict__.items(): if callable(__lowercase ) and name.startswith('test' ): for decorator in decorators: __UpperCamelCase = decorator(__lowercase ) setattr(cls , __lowercase , __lowercase ) return cls return decorate class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =0 SCREAMING_SNAKE_CASE_ : List[Any] =1 SCREAMING_SNAKE_CASE_ : Union[str, Any] =2 @contextmanager def lowercase__ ( __lowercase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , __lowercase : Dict=1e-16 ) -> List[Any]: """simple docstring""" __UpperCamelCase = requests.Session().request def timeout_request(__lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , **__lowercase : List[str] ): # Change the url to an invalid url so that the connection hangs __UpperCamelCase = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) __UpperCamelCase = timeout try: return online_request(__lowercase , __lowercase , **__lowercase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __UpperCamelCase = url __UpperCamelCase = e.args[0] __UpperCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]''' ),) __UpperCamelCase = (max_retry_error,) raise def raise_connection_error(__lowercase : int , __lowercase : List[str] , **__lowercase : Union[str, Any] ): raise requests.ConnectionError('Offline mode is enabled.' , request=__lowercase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , __lowercase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , __lowercase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , __lowercase ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def lowercase__ ( *__lowercase : Any , **__lowercase : Dict ) -> Dict: """simple docstring""" __UpperCamelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__lowercase , **__lowercase ) as tmp_dir: try: os.chdir(__lowercase ) yield finally: os.chdir(__lowercase ) @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase__ ( __lowercase : List[str] , __lowercase : int ) -> Union[str, Any]: """simple docstring""" return deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(__lowercase : List[Any] , *__lowercase : Tuple , **__lowercase : Union[str, Any] ): try: return func(*__lowercase , **__lowercase ) except HTTPError as err: if str(__lowercase ).startswith('500' ) or str(__lowercase ).startswith('502' ): pytest.xfail(str(__lowercase ) ) raise err return decorator.decorator(_wrapper , __lowercase ) class snake_case : """simple docstring""" def __init__( self : int , __A : Any , __A : str , __A : List[Any] ): __UpperCamelCase = returncode __UpperCamelCase = stdout __UpperCamelCase = stderr async def lowercase__ ( __lowercase : Any , __lowercase : Optional[int] ) -> str: """simple docstring""" while True: __UpperCamelCase = await stream.readline() if line: callback(__lowercase ) else: break async def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : Any=None , __lowercase : Optional[Any]=None , __lowercase : int=False , __lowercase : List[Any]=False ) -> _RunOutput: """simple docstring""" if echo: print('\nRunning: ' , ' '.join(__lowercase ) ) __UpperCamelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __UpperCamelCase = [] __UpperCamelCase = [] def tee(__lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Tuple="" ): __UpperCamelCase = line.decode('utf-8' ).rstrip() sink.append(__lowercase ) if not quiet: print(__lowercase , __lowercase , file=__lowercase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label='stderr:' ) ), ] , timeout=__lowercase , ) return _RunOutput(await p.wait() , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : int=180 , __lowercase : int=False , __lowercase : str=True ) -> _RunOutput: """simple docstring""" __UpperCamelCase = asyncio.get_event_loop() __UpperCamelCase = loop.run_until_complete( _stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) ) __UpperCamelCase = ' '.join(__lowercase ) if result.returncode > 0: __UpperCamelCase = '\n'.join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' ) return result def lowercase__ ( ) -> List[str]: """simple docstring""" __UpperCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) __UpperCamelCase = re.sub(R'^gw' , '' , __lowercase , 0 , re.M ) return int(__lowercase ) def lowercase__ ( ) -> List[Any]: """simple docstring""" __UpperCamelCase = 29500 __UpperCamelCase = pytest_xdist_worker_id() return port + uniq_delta
53
1
'''simple docstring''' import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def lowercase__ ( __lowercase : int , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : Optional[int]=1024 ) -> Dict: """simple docstring""" __UpperCamelCase , __UpperCamelCase = [], [] __UpperCamelCase = list(zip(__lowercase , __lowercase ) ) __UpperCamelCase , __UpperCamelCase = sorted_examples[0] def is_too_big(__lowercase : Optional[Any] ): return tok(__lowercase , return_tensors='pt' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): __UpperCamelCase = new_src + ' ' + src __UpperCamelCase = new_tgt + ' ' + tgt if is_too_big(__lowercase ) or is_too_big(__lowercase ): # cant fit, finalize example finished_src.append(__lowercase ) finished_tgt.append(__lowercase ) __UpperCamelCase , __UpperCamelCase = src, tgt else: # can fit, keep adding __UpperCamelCase , __UpperCamelCase = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(__lowercase ) finished_tgt.append(__lowercase ) return finished_src, finished_tgt def lowercase__ ( __lowercase : List[str] , __lowercase : Path , __lowercase : Dict , __lowercase : Dict ) -> Optional[Any]: """simple docstring""" __UpperCamelCase = Path(__lowercase ) save_path.mkdir(exist_ok=__lowercase ) for split in ["train"]: __UpperCamelCase , __UpperCamelCase = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' __UpperCamelCase = [x.rstrip() for x in Path(__lowercase ).open().readlines()] __UpperCamelCase = [x.rstrip() for x in Path(__lowercase ).open().readlines()] __UpperCamelCase , __UpperCamelCase = pack_examples(__lowercase , __lowercase , __lowercase , __lowercase ) print(F'''packed {split} split from {len(__lowercase )} examples -> {len(__lowercase )}.''' ) Path(save_path / F'''{split}.source''' ).open('w' ).write('\n'.join(__lowercase ) ) Path(save_path / F'''{split}.target''' ).open('w' ).write('\n'.join(__lowercase ) ) for split in ["val", "test"]: __UpperCamelCase , __UpperCamelCase = data_dir / F'''{split}.source''', data_dir / F'''{split}.target''' shutil.copyfile(__lowercase , save_path / F'''{split}.source''' ) shutil.copyfile(__lowercase , save_path / F'''{split}.target''' ) def lowercase__ ( ) -> List[str]: """simple docstring""" __UpperCamelCase = argparse.ArgumentParser() parser.add_argument('--tok_name' , type=__lowercase , help='like facebook/bart-large-cnn,t5-base, etc.' ) parser.add_argument('--max_seq_len' , type=__lowercase , default=128 ) parser.add_argument('--data_dir' , type=__lowercase ) parser.add_argument('--save_path' , type=__lowercase ) __UpperCamelCase = parser.parse_args() __UpperCamelCase = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(__lowercase , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
53
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys a__ : Tuple ='''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
53
1
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def lowercase__ ( ) -> Optional[int]: """simple docstring""" __UpperCamelCase = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg' __UpperCamelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('RGB' ) return image def lowercase__ ( __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') ) # fmt: on return rename_keys def lowercase__ ( __lowercase : List[Any] , __lowercase : Any , __lowercase : Any ) -> List[str]: """simple docstring""" __UpperCamelCase = dct.pop(__lowercase ) __UpperCamelCase = val def lowercase__ ( __lowercase : str , __lowercase : Tuple ) -> Union[str, Any]: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases __UpperCamelCase = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) __UpperCamelCase = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict __UpperCamelCase = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) ) __UpperCamelCase = qkv_bias def lowercase__ ( __lowercase : int ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = 364 if 'coco' in model_name else 224 __UpperCamelCase = InstructBlipVisionConfig(image_size=__lowercase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: __UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: __UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: __UpperCamelCase = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=32001 ).to_dict() elif "vicuna-13b" in model_name: __UpperCamelCase = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=32001 ).to_dict() else: raise ValueError('Model name not supported' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 __UpperCamelCase = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict() __UpperCamelCase = InstructBlipConfig(vision_config=__lowercase , text_config=__lowercase , qformer_config=__lowercase ) return config, image_size @torch.no_grad() def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Dict=None , __lowercase : Dict=False ) -> Optional[Any]: """simple docstring""" __UpperCamelCase = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' ) qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} ) if "t5" in model_name: __UpperCamelCase = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) __UpperCamelCase = LlamaTokenizerFast.from_pretrained( 'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' ) tokenizer.add_special_tokens({'pad_token': '[PAD]'} ) __UpperCamelCase , __UpperCamelCase = get_blipa_config(__lowercase ) __UpperCamelCase = InstructBlipForConditionalGeneration(__lowercase ).eval() __UpperCamelCase = { 'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'), 'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'), 'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'), 'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'), } __UpperCamelCase , __UpperCamelCase = model_name_to_original[model_name] # load original model print('Loading original model...' ) __UpperCamelCase = 'cuda:1' if torch.cuda.is_available() else 'cpu' __UpperCamelCase = 'cuda:2' if torch.cuda.is_available() else 'cpu' __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_model_and_preprocess( name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase ) original_model.eval() print('Done!' ) # update state dict keys __UpperCamelCase = original_model.state_dict() __UpperCamelCase = create_rename_keys(__lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): __UpperCamelCase = state_dict.pop(__lowercase ) if key.startswith('Qformer.bert' ): __UpperCamelCase = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: __UpperCamelCase = key.replace('self' , 'attention' ) if "llm_proj" in key: __UpperCamelCase = key.replace('llm_proj' , 'language_projection' ) if "t5_proj" in key: __UpperCamelCase = key.replace('t5_proj' , 'language_projection' ) if key.startswith('llm_model' ): __UpperCamelCase = key.replace('llm_model' , 'language_model' ) if key.startswith('t5' ): __UpperCamelCase = key.replace('t5' , 'language' ) __UpperCamelCase = val # read in qv biases read_in_q_v_bias(__lowercase , __lowercase ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__lowercase , strict=__lowercase ) __UpperCamelCase = load_demo_image() __UpperCamelCase = 'What is unusual about this image?' # create processor __UpperCamelCase = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=__lowercase , image_std=__lowercase ) __UpperCamelCase = InstructBlipProcessor( image_processor=__lowercase , tokenizer=__lowercase , qformer_tokenizer=__lowercase , ) __UpperCamelCase = processor(images=__lowercase , text=__lowercase , return_tensors='pt' ).to(__lowercase ) # make sure processor creates exact same pixel values __UpperCamelCase = vis_processors['eval'](__lowercase ).unsqueeze(0 ).to(__lowercase ) __UpperCamelCase = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __lowercase ) original_model.to(__lowercase ) hf_model.to(__lowercase ) with torch.no_grad(): if "vicuna" in model_name: __UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits __UpperCamelCase = hf_model(**__lowercase ).logits else: __UpperCamelCase = original_model( {'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits __UpperCamelCase = tokenizer('\n' , return_tensors='pt' ).input_ids.to(__lowercase ) __UpperCamelCase = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) __UpperCamelCase = hf_model(**__lowercase , labels=__lowercase ).logits print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape __UpperCamelCase = 1e-4 if 'vicuna' in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __lowercase , atol=__lowercase ) print('Looks ok!' ) print('Generating with original model...' ) __UpperCamelCase = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('Generating with HF model...' ) __UpperCamelCase = hf_model.generate( **__lowercase , do_sample=__lowercase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? __UpperCamelCase = 2 print('Original generation:' , __lowercase ) __UpperCamelCase = processor.batch_decode(__lowercase , skip_special_tokens=__lowercase ) __UpperCamelCase = [text.strip() for text in output_text] print('HF generation:' , __lowercase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__lowercase ) hf_model.save_pretrained(__lowercase ) if push_to_hub: processor.push_to_hub(F'''Salesforce/{model_name}''' ) hf_model.push_to_hub(F'''Salesforce/{model_name}''' ) if __name__ == "__main__": a__ : str =argparse.ArgumentParser() a__ : List[str] =[ '''instructblip-vicuna-7b''', '''instructblip-vicuna-13b''', '''instructblip-flan-t5-xl''', '''instructblip-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''instructblip-flan-t5-xl''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) a__ : Any =parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
53
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Tuple ) -> Tuple: """simple docstring""" return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :] def lowercase__ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] , __lowercase : List[str]="attention" ) -> Optional[Any]: """simple docstring""" __UpperCamelCase = __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] ) __UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] ) __UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] ) __UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] ) __UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : int , __lowercase : List[Any]=False ) -> Optional[Any]: """simple docstring""" if split_mlp_wi: __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :] __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :] __UpperCamelCase = (wi_a, wi_a) else: __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :] __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :] return wi, wo def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] ) -> str: """simple docstring""" return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i] def lowercase__ ( __lowercase : dict , *, __lowercase : int , __lowercase : bool , __lowercase : bool = False ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = traverse_util.flatten_dict(variables['target'] ) __UpperCamelCase = {'/'.join(__lowercase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old print('Split MLP:' , __lowercase ) __UpperCamelCase = collections.OrderedDict() # Shared embeddings. __UpperCamelCase = old['token_embedder/embedding'] # Encoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'encoder' , 'attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 1 (MLP). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_mlp_layer_norm' ) __UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'encoder' , __lowercase ) __UpperCamelCase = layer_norm if split_mlp_wi: __UpperCamelCase = wi[0].T __UpperCamelCase = wi[1].T else: __UpperCamelCase = wi.T __UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , __lowercase , 'encoder' ).T __UpperCamelCase = old['encoder/encoder_norm/scale'] if not scalable_attention: __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , 0 , 'encoder' ).T __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , 0 , 'decoder' ).T if not is_encoder_only: # Decoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_self_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'self_attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 1 (Cross Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_cross_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'encoder_decoder_attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 2 (MLP). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_mlp_layer_norm' ) __UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'decoder' , __lowercase ) __UpperCamelCase = layer_norm if split_mlp_wi: __UpperCamelCase = wi[0].T __UpperCamelCase = wi[1].T else: __UpperCamelCase = wi.T __UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase = tax_relpos_bias_lookup(__lowercase , __lowercase , 'decoder' ).T __UpperCamelCase = old['decoder/decoder_norm/scale'] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __UpperCamelCase = old['decoder/logits_dense/kernel'].T return new def lowercase__ ( __lowercase : Optional[Any] , __lowercase : bool ) -> int: """simple docstring""" __UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __UpperCamelCase = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __UpperCamelCase = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) __UpperCamelCase = state_dict['shared.weight'] return state_dict def lowercase__ ( __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = checkpoints.load_tax_checkpoint(__lowercase ) __UpperCamelCase = convert_tax_to_pytorch( __lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase , scalable_attention=__lowercase ) __UpperCamelCase = make_state_dict(__lowercase , __lowercase ) model.load_state_dict(__lowercase , strict=__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : bool = False , __lowercase : bool = False , ) -> Optional[int]: """simple docstring""" __UpperCamelCase = MTaConfig.from_json_file(__lowercase ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __UpperCamelCase = UMTaEncoderModel(__lowercase ) else: __UpperCamelCase = UMTaForConditionalGeneration(__lowercase ) # Load weights from tf checkpoint load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(__lowercase ) # Verify that we can load the checkpoint. model.from_pretrained(__lowercase ) print('Done' ) if __name__ == "__main__": a__ : List[Any] =argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) parser.add_argument( '''--scalable_attention''', action='''store_true''', help='''Whether the model uses scaled attention (umt5 model)''', default=False, ) a__ : List[str] =parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
53
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class snake_case : """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] =MBartConfig SCREAMING_SNAKE_CASE_ : Union[str, Any] ={} SCREAMING_SNAKE_CASE_ : List[str] ="gelu" def __init__( self : Optional[int] , __A : List[Any] , __A : int=1_3 , __A : Optional[Any]=7 , __A : Dict=True , __A : List[Any]=False , __A : Dict=9_9 , __A : Optional[Any]=3_2 , __A : Union[str, Any]=2 , __A : int=4 , __A : List[str]=3_7 , __A : Dict=0.1 , __A : Any=0.1 , __A : str=2_0 , __A : Tuple=2 , __A : Tuple=1 , __A : Optional[int]=0 , ): __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = eos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = bos_token_id def _lowerCamelCase ( self : int ): __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCamelCase = prepare_mbart_inputs_dict(__A , __A , __A ) return config, inputs_dict def _lowerCamelCase ( self : int , __A : Dict , __A : Optional[Any] ): __UpperCamelCase = TFMBartModel(config=__A ).get_decoder() __UpperCamelCase = inputs_dict['input_ids'] __UpperCamelCase = input_ids[:1, :] __UpperCamelCase = inputs_dict['attention_mask'][:1, :] __UpperCamelCase = inputs_dict['head_mask'] __UpperCamelCase = 1 # first forward pass __UpperCamelCase = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A ) __UpperCamelCase , __UpperCamelCase = outputs.to_tuple() __UpperCamelCase = past_key_values[1] def lowercase__ ( __lowercase : str , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : List[str]=None , __lowercase : Dict=None , __lowercase : Dict=None , __lowercase : List[Any]=None , ) -> Dict: """simple docstring""" if attention_mask is None: __UpperCamelCase = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __UpperCamelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple =(TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () SCREAMING_SNAKE_CASE_ : str =(TFMBartForConditionalGeneration,) if is_tf_available() else () SCREAMING_SNAKE_CASE_ : List[Any] =( { "conversational": TFMBartForConditionalGeneration, "feature-extraction": TFMBartModel, "summarization": TFMBartForConditionalGeneration, "text2text-generation": TFMBartForConditionalGeneration, "translation": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE_ : Dict =True SCREAMING_SNAKE_CASE_ : Union[str, Any] =False SCREAMING_SNAKE_CASE_ : Optional[int] =False def _lowerCamelCase ( self : Union[str, Any] , __A : Dict , __A : List[Any] , __A : Any , __A : Tuple , __A : Optional[int] ): if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = TFMBartModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=__A ) def _lowerCamelCase ( self : Tuple ): self.config_tester.run_common_tests() def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__A ) @require_sentencepiece @require_tokenizers @require_tf class snake_case ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] =[ " UN Chief Says There Is No Military Solution in Syria", ] SCREAMING_SNAKE_CASE_ : Tuple =[ "Şeful ONU declară că nu există o soluţie militară în Siria", ] SCREAMING_SNAKE_CASE_ : Tuple ="facebook/mbart-large-en-ro" @cached_property def _lowerCamelCase ( self : Optional[int] ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _lowerCamelCase ( self : List[Any] , **__A : Dict ): __UpperCamelCase = self.translate_src_text(**__A ) self.assertListEqual(self.expected_text , __A ) def _lowerCamelCase ( self : Dict , **__A : Union[str, Any] ): __UpperCamelCase = self.tokenizer(self.src_text , **__A , return_tensors='tf' ) __UpperCamelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) __UpperCamelCase = self.tokenizer.batch_decode(__A , skip_special_tokens=__A ) return generated_words @slow def _lowerCamelCase ( self : Tuple ): self._assert_generated_batch_equal_expected()
53
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ : List[Any] ="BlipImageProcessor" SCREAMING_SNAKE_CASE_ : Optional[int] =("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , __A : Optional[int] , __A : List[Any] ): __UpperCamelCase = False super().__init__(__A , __A ) __UpperCamelCase = self.image_processor def __call__( self : List[Any] , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ): if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: __UpperCamelCase = self.tokenizer __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) return text_encoding # add pixel_values __UpperCamelCase = self.image_processor(__A , return_tensors=__A ) if text is not None: __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) else: __UpperCamelCase = None if text_encoding is not None: encoding_image_processor.update(__A ) return encoding_image_processor def _lowerCamelCase ( self : List[Any] , *__A : Dict , **__A : Optional[int] ): return self.tokenizer.batch_decode(*__A , **__A ) def _lowerCamelCase ( self : List[Any] , *__A : List[str] , **__A : Dict ): return self.tokenizer.decode(*__A , **__A ) @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.tokenizer.model_input_names __UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
53
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a__ : Optional[int] =logging.get_logger(__name__) a__ : Any =torch.device('''cpu''') def lowercase__ ( ) -> Tuple: """simple docstring""" __UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' __UpperCamelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im def lowercase__ ( __lowercase : int ) -> Tuple: """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] ) def lowercase__ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : Dict ) -> List[str]: """simple docstring""" __UpperCamelCase = dct.pop(__lowercase ) __UpperCamelCase = val def lowercase__ ( __lowercase : Optional[Any] ) -> str: """simple docstring""" __UpperCamelCase = [] for k in state_dict.keys(): __UpperCamelCase = k if ".pwconv" in k: __UpperCamelCase = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: __UpperCamelCase = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: __UpperCamelCase = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: __UpperCamelCase = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: __UpperCamelCase = k_new.split('.' ) if ls[2].isdigit(): __UpperCamelCase = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: __UpperCamelCase = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : int ) -> Tuple: """simple docstring""" __UpperCamelCase = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size __UpperCamelCase = 1000 __UpperCamelCase = 'huggingface/label-files' __UpperCamelCase = 'imagenet-1k-id2label.json' __UpperCamelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset' ) , 'r' ) ) __UpperCamelCase = {int(__lowercase ): v for k, v in idalabel.items()} __UpperCamelCase = idalabel __UpperCamelCase = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": __UpperCamelCase = [3, 3, 6, 4] __UpperCamelCase = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": __UpperCamelCase = [3, 3, 9, 6] __UpperCamelCase = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": __UpperCamelCase = [4, 3, 10, 5] __UpperCamelCase = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": __UpperCamelCase = [4, 4, 12, 6] __UpperCamelCase = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): __UpperCamelCase = torch.hub.load_state_dict_from_url(__lowercase , map_location='cpu' , check_hash=__lowercase ) else: __UpperCamelCase = torch.load(__lowercase , map_location='cpu' ) __UpperCamelCase = checkpoint __UpperCamelCase = create_rename_keys(__lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) # load HuggingFace model __UpperCamelCase = SwiftFormerForImageClassification(__lowercase ).eval() hf_model.load_state_dict(__lowercase ) # prepare test inputs __UpperCamelCase = prepare_img() __UpperCamelCase = ViTImageProcessor.from_pretrained('preprocessor_config' ) __UpperCamelCase = processor(images=__lowercase , return_tensors='pt' ) # compare outputs from both models __UpperCamelCase = get_expected_output(__lowercase ) __UpperCamelCase = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , __lowercase , atol=1e-3 ) Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' ) hf_model.save_pretrained(__lowercase ) if __name__ == "__main__": a__ : Optional[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') a__ : Union[str, Any] =parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
53
'''simple docstring''' from __future__ import annotations from typing import Any class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case : """simple docstring""" def __init__( self : List[Any] , __A : Any ): __UpperCamelCase = data __UpperCamelCase = None def __iter__( self : Optional[Any] ): __UpperCamelCase = self __UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(__A ) yield node.data __UpperCamelCase = node.next_node @property def _lowerCamelCase ( self : List[str] ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": a__ : Dict =Node(1) a__ : Optional[int] =Node(2) a__ : List[str] =Node(3) a__ : Optional[int] =Node(4) print(root_node.has_loop) # False a__ : str =root_node.next_node print(root_node.has_loop) # True a__ : Optional[int] =Node(5) a__ : List[Any] =Node(6) a__ : int =Node(5) a__ : Tuple =Node(6) print(root_node.has_loop) # False a__ : str =Node(1) print(root_node.has_loop) # False
53
1
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ : List[Any] ="BlipImageProcessor" SCREAMING_SNAKE_CASE_ : Optional[int] =("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , __A : Optional[int] , __A : List[Any] ): __UpperCamelCase = False super().__init__(__A , __A ) __UpperCamelCase = self.image_processor def __call__( self : List[Any] , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ): if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: __UpperCamelCase = self.tokenizer __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) return text_encoding # add pixel_values __UpperCamelCase = self.image_processor(__A , return_tensors=__A ) if text is not None: __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) else: __UpperCamelCase = None if text_encoding is not None: encoding_image_processor.update(__A ) return encoding_image_processor def _lowerCamelCase ( self : List[Any] , *__A : Dict , **__A : Optional[int] ): return self.tokenizer.batch_decode(*__A , **__A ) def _lowerCamelCase ( self : List[Any] , *__A : List[str] , **__A : Dict ): return self.tokenizer.decode(*__A , **__A ) @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.tokenizer.model_input_names __UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
53
'''simple docstring''' a__ : Optional[Any] =256 # Modulus to hash a string a__ : Dict =1_000_003 def lowercase__ ( __lowercase : str , __lowercase : str ) -> bool: """simple docstring""" __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) if p_len > t_len: return False __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 1 # Calculating the hash of pattern and substring of text for i in range(__lowercase ): __UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __UpperCamelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __UpperCamelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def lowercase__ ( ) -> None: """simple docstring""" __UpperCamelCase = 'abc1abc12' __UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __UpperCamelCase = 'alskfjaldsk23adsfabcabc' assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase ) # Test 2) __UpperCamelCase = 'ABABX' __UpperCamelCase = 'ABABZABABYABABX' assert rabin_karp(__lowercase , __lowercase ) # Test 3) __UpperCamelCase = 'AAAB' __UpperCamelCase = 'ABAAAAAB' assert rabin_karp(__lowercase , __lowercase ) # Test 4) __UpperCamelCase = 'abcdabcy' __UpperCamelCase = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(__lowercase , __lowercase ) # Test 5) __UpperCamelCase = 'Lü' __UpperCamelCase = 'Lüsai' assert rabin_karp(__lowercase , __lowercase ) __UpperCamelCase = 'Lue' assert not rabin_karp(__lowercase , __lowercase ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
53
1
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function a__ : Any =1.054571817E-34 # unit of ℏ : J * s a__ : List[Any] =3E8 # unit of c : m * s^-1 def lowercase__ ( __lowercase : float , __lowercase : float , __lowercase : float ) -> dict[str, float]: """simple docstring""" if (force, area, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if force < 0: raise ValueError('Magnitude of force can not be negative' ) if distance < 0: raise ValueError('Distance can not be negative' ) if area < 0: raise ValueError('Area can not be negative' ) if force == 0: __UpperCamelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: __UpperCamelCase = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: __UpperCamelCase = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('One and only one argument must be 0' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
53
'''simple docstring''' from __future__ import annotations class snake_case : """simple docstring""" def __init__( self : Optional[int] , __A : list[list[int]] ): __UpperCamelCase = TypeError( 'Matrices must be formed from a list of zero or more lists containing at ' 'least one and the same number of values, each of which must be of type ' 'int or float.' ) if len(__A ) != 0: __UpperCamelCase = len(rows[0] ) if cols == 0: raise error for row in rows: if len(__A ) != cols: raise error for value in row: if not isinstance(__A , (int, float) ): raise error __UpperCamelCase = rows else: __UpperCamelCase = [] def _lowerCamelCase ( self : int ): return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def _lowerCamelCase ( self : str ): return len(self.rows ) @property def _lowerCamelCase ( self : Any ): return len(self.rows[0] ) @property def _lowerCamelCase ( self : Optional[Any] ): return (self.num_rows, self.num_columns) @property def _lowerCamelCase ( self : Dict ): return self.order[0] == self.order[1] def _lowerCamelCase ( self : Any ): __UpperCamelCase = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Any ): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def _lowerCamelCase ( self : List[str] ): return bool(self.determinant() ) def _lowerCamelCase ( self : Dict , __A : int , __A : int ): __UpperCamelCase = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(__A ).determinant() def _lowerCamelCase ( self : Dict , __A : int , __A : int ): if (row + column) % 2 == 0: return self.get_minor(__A , __A ) return -1 * self.get_minor(__A , __A ) def _lowerCamelCase ( self : List[str] ): return Matrix( [ [self.get_minor(__A , __A ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def _lowerCamelCase ( self : Union[str, Any] ): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = self.determinant() if not determinant: raise TypeError('Only matrices with a non-zero determinant have an inverse' ) return self.adjugate() * (1 / determinant) def __repr__( self : Optional[Any] ): return str(self.rows ) def __str__( self : Union[str, Any] ): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '[' + '. '.join([str(__A ) for value in row] ) + '.]' for row in self.rows ] ) + "]" ) def _lowerCamelCase ( self : List[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError('Row must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in row: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_columns: raise ValueError( 'Row must be equal in length to the other rows in the matrix' ) if position is None: self.rows.append(__A ) else: __UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:] def _lowerCamelCase ( self : Optional[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError( 'Column must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in column: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_rows: raise ValueError( 'Column must be equal in length to the other columns in the matrix' ) if position is None: __UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: __UpperCamelCase = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self : Tuple , __A : object ): if not isinstance(__A , __A ): return NotImplemented return self.rows == other.rows def __ne__( self : Any , __A : object ): return not self == other def __neg__( self : List[Any] ): return self * -1 def __add__( self : List[str] , __A : Matrix ): if self.order != other.order: raise ValueError('Addition requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self : str , __A : Matrix ): if self.order != other.order: raise ValueError('Subtraction requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self : str , __A : Matrix | int | float ): if isinstance(__A , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(__A , __A ): if self.num_columns != other.num_rows: raise ValueError( 'The number of columns in the first matrix must ' 'be equal to the number of rows in the second' ) return Matrix( [ [Matrix.dot_product(__A , __A ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( 'A Matrix can only be multiplied by an int, float, or another matrix' ) def __pow__( self : Union[str, Any] , __A : int ): if not isinstance(__A , __A ): raise TypeError('A Matrix can only be raised to the power of an int' ) if not self.is_square: raise ValueError('Only square matrices can be raised to a power' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( 'Only invertable matrices can be raised to a negative power' ) __UpperCamelCase = self for _ in range(other - 1 ): result *= self return result @classmethod def _lowerCamelCase ( cls : Tuple , __A : list[int] , __A : list[int] ): return sum(row[i] * column[i] for i in range(len(__A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
53
1
'''simple docstring''' import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''') # TF training parameters a__ : Optional[Any] =False a__ : str =False def lowercase__ ( __lowercase : Namespace ) -> int: """simple docstring""" return TrainCommand(__lowercase ) class snake_case ( __lowerCamelCase ): """simple docstring""" @staticmethod def _lowerCamelCase ( __A : ArgumentParser ): __UpperCamelCase = parser.add_parser('train' , help='CLI tool to train a model on a task.' ) train_parser.add_argument( '--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , ) train_parser.add_argument( '--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' ) train_parser.add_argument( '--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' ) train_parser.add_argument( '--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' ) train_parser.add_argument( '--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' ) train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' ) train_parser.add_argument( '--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , ) train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' ) train_parser.add_argument( '--task' , type=__A , default='text_classification' , help='Task to train the model on.' ) train_parser.add_argument( '--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' ) train_parser.add_argument('--train_batch_size' , type=__A , default=3_2 , help='Batch size for training.' ) train_parser.add_argument('--valid_batch_size' , type=__A , default=6_4 , help='Batch size for validation.' ) train_parser.add_argument('--learning_rate' , type=__A , default=3e-5 , help='Learning rate.' ) train_parser.add_argument('--adam_epsilon' , type=__A , default=1e-08 , help='Epsilon for Adam optimizer.' ) train_parser.set_defaults(func=__A ) def __init__( self : Any , __A : Namespace ): __UpperCamelCase = logging.get_logger('transformers-cli/training' ) __UpperCamelCase = 'tf' if is_tf_available() else 'torch' os.makedirs(args.output , exist_ok=__A ) __UpperCamelCase = args.output __UpperCamelCase = args.column_label __UpperCamelCase = args.column_text __UpperCamelCase = args.column_id self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' ) if args.task == "text_classification": __UpperCamelCase = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f'''Loading dataset from {args.train_data}''' ) __UpperCamelCase = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) __UpperCamelCase = None if args.validation_data: self.logger.info(f'''Loading validation dataset from {args.validation_data}''' ) __UpperCamelCase = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) __UpperCamelCase = args.validation_split __UpperCamelCase = args.train_batch_size __UpperCamelCase = args.valid_batch_size __UpperCamelCase = args.learning_rate __UpperCamelCase = args.adam_epsilon def _lowerCamelCase ( self : Dict ): if self.framework == "tf": return self.run_tf() return self.run_torch() def _lowerCamelCase ( self : List[Any] ): raise NotImplementedError def _lowerCamelCase ( self : Optional[Any] ): self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
53
'''simple docstring''' import os import numpy import onnx def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Dict: """simple docstring""" __UpperCamelCase = a.name __UpperCamelCase = b.name __UpperCamelCase = '' __UpperCamelCase = '' __UpperCamelCase = a == b __UpperCamelCase = name_a __UpperCamelCase = name_b return res def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : List[Any] ) -> Optional[int]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(__lowercase , __lowercase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) _graph_replace_input_with(node_proto.attribute[1].g , __lowercase , __lowercase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) def lowercase__ ( __lowercase : int , __lowercase : List[Any] , __lowercase : Dict ) -> int: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(__lowercase , __lowercase , __lowercase ) def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : str ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __UpperCamelCase = inits[i].name __UpperCamelCase = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = os.path.dirname(__lowercase ) __UpperCamelCase = os.path.basename(__lowercase ) __UpperCamelCase = onnx.load(os.path.join(__lowercase , __lowercase ) ) __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = set() __UpperCamelCase = {} __UpperCamelCase = [] __UpperCamelCase = 0 for i in range(len(__lowercase ) ): if i in dup_set: continue for j in range(i + 1 , len(__lowercase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(__lowercase ) dup_set.add(__lowercase ) __UpperCamelCase = inits[j].data_type __UpperCamelCase = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , __lowercase ) total_reduced_size += mem_size __UpperCamelCase = inits[i].name __UpperCamelCase = inits[j].name if name_i in dup_map: dup_map[name_i].append(__lowercase ) else: __UpperCamelCase = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) __UpperCamelCase = sorted(__lowercase ) _remove_dup_initializers_from_model(__lowercase , __lowercase , __lowercase ) __UpperCamelCase = 'optimized_' + model_file_name __UpperCamelCase = os.path.join(__lowercase , __lowercase ) onnx.save(__lowercase , __lowercase ) return new_model
53
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a__ : str =logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str =["input_features", "attention_mask"] def __init__( self : Union[str, Any] , __A : Optional[int]=8_0 , __A : Tuple=1_6_0_0_0 , __A : Optional[Any]=8_0 , __A : Any=0.0 , __A : Any=True , __A : List[str]=True , __A : str=True , **__A : List[Any] , ): super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A ) __UpperCamelCase = num_mel_bins __UpperCamelCase = do_ceptral_normalize __UpperCamelCase = normalize_means __UpperCamelCase = normalize_vars __UpperCamelCase = True def _lowerCamelCase ( self : Union[str, Any] , __A : np.ndarray , ): __UpperCamelCase = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers __UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 ) __UpperCamelCase = ta_kaldi.fbank(__A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _lowerCamelCase ( __A : np.ndarray , __A : int , __A : Optional[bool] = True , __A : Optional[bool] = True , __A : float = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: __UpperCamelCase = x[:input_length].mean(axis=0 ) __UpperCamelCase = np.subtract(__A , __A ) if normalize_vars: __UpperCamelCase = x[:input_length].std(axis=0 ) __UpperCamelCase = np.divide(__A , __A ) if input_length < x.shape[0]: __UpperCamelCase = padding_value # make sure array is in float32 __UpperCamelCase = x.astype(np.floataa ) return x def _lowerCamelCase ( self : int , __A : List[np.ndarray] , __A : Optional[np.ndarray] = None ): __UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__A , __A , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(__A , __A ) ] def __call__( self : List[Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : Union[bool, str, PaddingStrategy] = False , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[int] = None , __A : Optional[bool] = None , **__A : Dict , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __UpperCamelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) __UpperCamelCase = is_batched_numpy or ( isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__A , np.ndarray ): __UpperCamelCase = np.asarray(__A , dtype=np.floataa ) elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __UpperCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __UpperCamelCase = [raw_speech] # extract fbank features __UpperCamelCase = [self._extract_fbank_features(__A ) for waveform in raw_speech] # convert into correct format for padding __UpperCamelCase = BatchFeature({'input_features': features} ) __UpperCamelCase = self.pad( __A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , ) # make sure list is in array format __UpperCamelCase = padded_inputs.get('input_features' ) if isinstance(input_features[0] , __A ): __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_features] __UpperCamelCase = padded_inputs.get('attention_mask' ) if attention_mask is not None: __UpperCamelCase = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __UpperCamelCase = ( np.array(__A , dtype=np.intaa ) if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD else None ) __UpperCamelCase = self.normalize( padded_inputs['input_features'] , attention_mask=__A ) if return_tensors is not None: __UpperCamelCase = padded_inputs.convert_to_tensors(__A ) return padded_inputs
53
'''simple docstring''' import random def lowercase__ ( __lowercase : list , __lowercase : Optional[Any] ) -> tuple: """simple docstring""" __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = [], [], [] for element in data: if element < pivot: less.append(__lowercase ) elif element > pivot: greater.append(__lowercase ) else: equal.append(__lowercase ) return less, equal, greater def lowercase__ ( __lowercase : list , __lowercase : int ) -> Dict: """simple docstring""" if index >= len(__lowercase ) or index < 0: return None __UpperCamelCase = items[random.randint(0 , len(__lowercase ) - 1 )] __UpperCamelCase = 0 __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = _partition(__lowercase , __lowercase ) __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__lowercase , __lowercase ) # must be in larger else: return quick_select(__lowercase , index - (m + count) )
53
1
'''simple docstring''' from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case : """simple docstring""" def __init__( self : str , __A : Tuple , __A : Union[str, Any]=3 , __A : List[Any]=3_2 , __A : Any=3 , __A : Dict=1_0 , __A : Tuple=[1_0, 2_0, 3_0, 4_0] , __A : str=[1, 1, 2, 1] , __A : int=True , __A : Optional[int]=True , __A : List[str]="relu" , __A : Union[str, Any]=3 , __A : Tuple=None , ): __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = image_size __UpperCamelCase = num_channels __UpperCamelCase = embeddings_size __UpperCamelCase = hidden_sizes __UpperCamelCase = depths __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = hidden_act __UpperCamelCase = num_labels __UpperCamelCase = scope __UpperCamelCase = len(__A ) def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __UpperCamelCase = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self : Any ): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] , __A : Optional[int] , __A : Optional[Any] ): __UpperCamelCase = TFRegNetModel(config=__A ) __UpperCamelCase = model(__A , training=__A ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def _lowerCamelCase ( self : Any , __A : Optional[int] , __A : List[str] , __A : Any ): __UpperCamelCase = self.num_labels __UpperCamelCase = TFRegNetForImageClassification(__A ) __UpperCamelCase = model(__A , labels=__A , training=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs __UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] =(TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () SCREAMING_SNAKE_CASE_ : Tuple =( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE_ : List[str] =False SCREAMING_SNAKE_CASE_ : List[Any] =False SCREAMING_SNAKE_CASE_ : Union[str, Any] =False SCREAMING_SNAKE_CASE_ : Tuple =False SCREAMING_SNAKE_CASE_ : int =False def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = TFRegNetModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=__A , has_text_modality=__A ) def _lowerCamelCase ( self : Any ): return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def _lowerCamelCase ( self : Tuple ): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def _lowerCamelCase ( self : Optional[Any] ): super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def _lowerCamelCase ( self : Optional[int] ): pass def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(__A ) __UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase = [*signature.parameters.keys()] __UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , __A ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def _lowerCamelCase ( self : Optional[Any] ): def check_hidden_states_output(__A : Any , __A : Optional[int] , __A : Union[str, Any] ): __UpperCamelCase = model_class(__A ) __UpperCamelCase = model(**self._prepare_for_class(__A , __A ) , training=__A ) __UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(__A ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: __UpperCamelCase = layer_type __UpperCamelCase = True check_hidden_states_output(__A , __A , __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCamelCase = True check_hidden_states_output(__A , __A , __A ) def _lowerCamelCase ( self : List[str] ): __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__A : int , __A : Any , __A : List[Any] , __A : List[str]={} ): __UpperCamelCase = model(__A , return_dict=__A , **__A ) __UpperCamelCase = model(__A , return_dict=__A , **__A ).to_tuple() def recursive_check(__A : Any , __A : Optional[Any] ): if isinstance(__A , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__A , __A ): recursive_check(__A , __A ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__A , __A ) ) , msg=( 'Tuple and dict output are not equal. Difference:' f''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(__A , __A ) for model_class in self.all_model_classes: __UpperCamelCase = model_class(__A ) __UpperCamelCase = self._prepare_for_class(__A , __A ) __UpperCamelCase = self._prepare_for_class(__A , __A ) check_equivalence(__A , __A , __A ) __UpperCamelCase = self._prepare_for_class(__A , __A , return_labels=__A ) __UpperCamelCase = self._prepare_for_class(__A , __A , return_labels=__A ) check_equivalence(__A , __A , __A ) __UpperCamelCase = self._prepare_for_class(__A , __A ) __UpperCamelCase = self._prepare_for_class(__A , __A ) check_equivalence(__A , __A , __A , {'output_hidden_states': True} ) __UpperCamelCase = self._prepare_for_class(__A , __A , return_labels=__A ) __UpperCamelCase = self._prepare_for_class(__A , __A , return_labels=__A ) check_equivalence(__A , __A , __A , {'output_hidden_states': True} ) def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) @slow def _lowerCamelCase ( self : List[str] ): for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = TFRegNetModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def lowercase__ ( ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCamelCase ( self : List[str] ): return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __UpperCamelCase = self.default_image_processor __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=__A , return_tensors='tf' ) # forward pass __UpperCamelCase = model(**__A , training=__A ) # verify the logits __UpperCamelCase = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __A ) __UpperCamelCase = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __A , atol=1e-4 )
53
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowercase__ ( __lowercase : Any ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def lowercase__ ( __lowercase : Tuple ) -> int: """simple docstring""" __UpperCamelCase , __UpperCamelCase = emb.weight.shape __UpperCamelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) __UpperCamelCase = emb.weight.data return lin_layer def lowercase__ ( __lowercase : int , __lowercase : List[str]="facebook/mbart-large-en-ro" , __lowercase : str=False , __lowercase : List[Any]=False ) -> int: """simple docstring""" __UpperCamelCase = torch.load(__lowercase , map_location='cpu' )['model'] remove_ignore_keys_(__lowercase ) __UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0] __UpperCamelCase = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase ) if mbart_aa and finetuned: __UpperCamelCase = 'relu' __UpperCamelCase = state_dict['decoder.embed_tokens.weight'] __UpperCamelCase = MBartForConditionalGeneration(__lowercase ) model.model.load_state_dict(__lowercase ) if finetuned: __UpperCamelCase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a__ : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') a__ : Union[str, Any] =parser.parse_args() a__ : str =convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
53
1
'''simple docstring''' import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process a__ : Any =logging.getLogger(__name__) a__ : Optional[int] =list(MODEL_WITH_LM_HEAD_MAPPING.keys()) a__ : Dict =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class snake_case : """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={ "help": ( "The model checkpoint for weights initialization. Leave None if you want to train a model from" " scratch." ) } , ) SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__lowerCamelCase )} , ) SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class snake_case : """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={"help": "The input training data file (a text file)."} ) SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={ "help": ( "The input training data files (multiple files in glob format). " "Very often splitting large files to smaller files can prevent tokenizer going out of memory" ) } , ) SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , ) SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , ) SCREAMING_SNAKE_CASE_ : bool =field( default=__lowerCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , ) SCREAMING_SNAKE_CASE_ : bool =field( default=__lowerCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} ) SCREAMING_SNAKE_CASE_ : bool =field(default=__lowerCamelCase , metadata={"help": "Whether ot not to use whole word mask."} ) SCREAMING_SNAKE_CASE_ : float =field( default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) SCREAMING_SNAKE_CASE_ : float =field( default=1 / 6 , metadata={ "help": ( "Ratio of length of a span of masked tokens to surrounding context length for permutation language" " modeling." ) } , ) SCREAMING_SNAKE_CASE_ : int =field( default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} ) SCREAMING_SNAKE_CASE_ : int =field( default=-1 , metadata={ "help": ( "Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." "Default to the model max input length for single sentence inputs (take into account special tokens)." ) } , ) SCREAMING_SNAKE_CASE_ : bool =field( default=__lowerCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def lowercase__ ( __lowercase : DataTrainingArguments , __lowercase : PreTrainedTokenizer , __lowercase : bool = False , __lowercase : Optional[str] = None , ) -> Any: """simple docstring""" def _dataset(__lowercase : Tuple , __lowercase : int=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' ) return LineByLineWithRefDataset( tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size , ref_path=__lowercase , ) return LineByLineTextDataset(tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size ) else: return TextDataset( tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__lowercase , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(__lowercase ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def lowercase__ ( ) -> List[Any]: """simple docstring""" __UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( 'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ' 'or remove the --do_eval argument.' ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , __lowercase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: __UpperCamelCase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __UpperCamelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: __UpperCamelCase = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.tokenizer_name: __UpperCamelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __UpperCamelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another' ' script, save it,and load it from here, using --tokenizer_name' ) if model_args.model_name_or_path: __UpperCamelCase = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , ) else: logger.info('Training new model from scratch' ) __UpperCamelCase = AutoModelWithLMHead.from_config(__lowercase ) model.resize_token_embeddings(len(__lowercase ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( 'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the' '--mlm flag (masked language modeling).' ) if data_args.block_size <= 0: __UpperCamelCase = tokenizer.max_len # Our input block size will be the max possible for the model else: __UpperCamelCase = min(data_args.block_size , tokenizer.max_len ) # Get datasets __UpperCamelCase = ( get_dataset(__lowercase , tokenizer=__lowercase , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) __UpperCamelCase = ( get_dataset(__lowercase , tokenizer=__lowercase , evaluate=__lowercase , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": __UpperCamelCase = DataCollatorForPermutationLanguageModeling( tokenizer=__lowercase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: __UpperCamelCase = DataCollatorForWholeWordMask( tokenizer=__lowercase , mlm_probability=data_args.mlm_probability ) else: __UpperCamelCase = DataCollatorForLanguageModeling( tokenizer=__lowercase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer __UpperCamelCase = Trainer( model=__lowercase , args=__lowercase , data_collator=__lowercase , train_dataset=__lowercase , eval_dataset=__lowercase , prediction_loss_only=__lowercase , ) # Training if training_args.do_train: __UpperCamelCase = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=__lowercase ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __UpperCamelCase = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __UpperCamelCase = trainer.evaluate() __UpperCamelCase = math.exp(eval_output['eval_loss'] ) __UpperCamelCase = {'perplexity': perplexity} __UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results_lm.txt' ) if trainer.is_world_master(): with open(__lowercase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , __lowercase , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) results.update(__lowercase ) return results def lowercase__ ( __lowercase : Tuple ) -> Dict: """simple docstring""" main() if __name__ == "__main__": main()
53
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : Any , __A : Dict , __A : str , __A : List[Any]=1_0_2_4 , __A : Tuple=1_0_2_4 , __A : str=3.6 ): __UpperCamelCase = tokenizer __UpperCamelCase = tokenizer.bos_token_id __UpperCamelCase = dataset __UpperCamelCase = seq_length __UpperCamelCase = seq_length * chars_per_token * num_of_sequences def __iter__( self : Any ): __UpperCamelCase = iter(self.dataset ) __UpperCamelCase = True while more_examples: __UpperCamelCase , __UpperCamelCase = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(__A )['content'] ) buffer_len += len(buffer[-1] ) except StopIteration: __UpperCamelCase = False break __UpperCamelCase = tokenizer(__A , truncation=__A )['input_ids'] __UpperCamelCase = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(__A ) , self.seq_length ): __UpperCamelCase = all_token_ids[i : i + self.seq_length] if len(__A ) == self.seq_length: yield torch.tensor(__A ) def lowercase__ ( __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = {'streaming': True} __UpperCamelCase = load_dataset(args.dataset_name , split='train' , **__lowercase ) __UpperCamelCase = ConstantLengthDataset(__lowercase , __lowercase , seq_length=args.seq_length ) __UpperCamelCase = DataLoader(__lowercase , batch_size=args.batch_size ) return eval_dataloader def lowercase__ ( __lowercase : Tuple ) -> Optional[Any]: """simple docstring""" model.eval() __UpperCamelCase = [] for step, batch in enumerate(__lowercase ): with torch.no_grad(): __UpperCamelCase = model(__lowercase , labels=__lowercase ) __UpperCamelCase = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__lowercase ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break __UpperCamelCase = torch.mean(torch.cat(__lowercase ) ) try: __UpperCamelCase = torch.exp(__lowercase ) except OverflowError: __UpperCamelCase = float('inf' ) return loss.item(), perplexity.item() # Setup Accelerator a__ : int =Accelerator() # Parse configuration a__ : Dict =HfArgumentParser(EvaluationArguments) a__ : Union[str, Any] =parser.parse_args() set_seed(args.seed) # Logging a__ : List[Any] =logging.getLogger(__name__) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) # Load model and tokenizer a__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(args.model_ckpt) a__ : List[Any] =AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader a__ : Union[str, Any] =create_dataloader(args) # Prepare everything with our `accelerator`. a__ , a__ : List[str] =accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('''Evaluating and saving model after training''') a__ , a__ : Any =evaluate(args) logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
53
1
'''simple docstring''' import datasets from .evaluate import evaluate a__ : List[str] ='''\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } ''' a__ : Dict =''' This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. ''' a__ : List[Any] =''' Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair as given in the references (see below) - \'prediction_text\': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair (see above), - \'answers\': a Dict in the CUAD dataset format { \'text\': list of possible texts for the answer, as a list of strings \'answer_start\': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: \'exact_match\': Exact match (the normalized answer exactly match the gold answer) \'f1\': The F-score of predicted tokens versus the gold answer \'aupr\': Area Under the Precision-Recall curve \'prec_at_80_recall\': Precision at 80% recall \'prec_at_90_recall\': Precision at 90% recall Examples: >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}] >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}] >>> cuad_metric = datasets.load_metric("cuad") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): """simple docstring""" def _lowerCamelCase ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': { 'id': datasets.Value('string' ), 'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ), }, 'references': { 'id': datasets.Value('string' ), 'answers': datasets.features.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), }, } ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , ) def _lowerCamelCase ( self : str , __A : Optional[Any] , __A : int ): __UpperCamelCase = {prediction['id']: prediction['prediction_text'] for prediction in predictions} __UpperCamelCase = [ { 'paragraphs': [ { 'qas': [ { 'answers': [{'text': answer_text} for answer_text in ref['answers']['text']], 'id': ref['id'], } for ref in references ] } ] } ] __UpperCamelCase = evaluate(dataset=__A , predictions=__A ) return score
53
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging a__ : Any =logging.get_logger(__name__) a__ : Optional[Any] ={ '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict ="gpt_neo" SCREAMING_SNAKE_CASE_ : Optional[int] =["past_key_values"] SCREAMING_SNAKE_CASE_ : List[Any] ={"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Union[str, Any] , __A : Union[str, Any]=5_0_2_5_7 , __A : Any=2_0_4_8 , __A : Optional[Any]=2_0_4_8 , __A : Any=2_4 , __A : Union[str, Any]=[[["global", "local"], 1_2]] , __A : str=1_6 , __A : Optional[int]=None , __A : Union[str, Any]=2_5_6 , __A : Any="gelu_new" , __A : Dict=0.0 , __A : Optional[int]=0.0 , __A : int=0.0 , __A : List[str]=0.1 , __A : Any=1e-5 , __A : int=0.02 , __A : List[str]=True , __A : Tuple=5_0_2_5_6 , __A : Optional[Any]=5_0_2_5_6 , **__A : Optional[Any] , ): __UpperCamelCase = vocab_size __UpperCamelCase = max_position_embeddings __UpperCamelCase = hidden_size __UpperCamelCase = num_layers __UpperCamelCase = num_heads __UpperCamelCase = intermediate_size __UpperCamelCase = window_size __UpperCamelCase = activation_function __UpperCamelCase = resid_dropout __UpperCamelCase = embed_dropout __UpperCamelCase = attention_dropout __UpperCamelCase = classifier_dropout __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = initializer_range __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id __UpperCamelCase = attention_types __UpperCamelCase = self.expand_attention_types_params(__A ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=__A , eos_token_id=__A , **__A ) @staticmethod def _lowerCamelCase ( __A : Tuple ): __UpperCamelCase = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase__ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Any: """simple docstring""" import torch __UpperCamelCase = input.size() __UpperCamelCase = len(__lowercase ) __UpperCamelCase = shape[dimension] __UpperCamelCase = torch.arange(0 , __lowercase , __lowercase ) __UpperCamelCase = torch.div(sizedim - size , __lowercase , rounding_mode='floor' ) + 1 __UpperCamelCase = torch.arange(__lowercase ) + low_indices[:min_length][:, None] __UpperCamelCase = [slice(__lowercase )] * rank __UpperCamelCase = indices __UpperCamelCase = input[s] __UpperCamelCase = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Optional[int]: """simple docstring""" import torch __UpperCamelCase = torch.arange(1 , __lowercase ) __UpperCamelCase = torch.remainder(__lowercase , __lowercase ) __UpperCamelCase = remainders == 0 __UpperCamelCase = candidates[divisor_indices] __UpperCamelCase = torch.max(__lowercase ) return largest_divisor, torch.div(__lowercase , __lowercase , rounding_mode='floor' ) class snake_case ( __lowerCamelCase ): """simple docstring""" @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(__A , direction='inputs' ) __UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return common_inputs @property def _lowerCamelCase ( self : int ): return self._config.num_heads def _lowerCamelCase ( self : List[str] , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ): __UpperCamelCase = super(__A , self ).generate_dummy_inputs( __A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A ) # We need to order the input in the way they appears in the forward() __UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCamelCase = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] __UpperCamelCase = common_inputs['attention_mask'] if self.use_past: __UpperCamelCase = ordered_inputs['attention_mask'].dtype __UpperCamelCase = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 ) return ordered_inputs @property def _lowerCamelCase ( self : Dict ): return 1_3
53
1
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class snake_case : """simple docstring""" def __init__( self : List[Any] , __A : List[Any] , __A : Any=1_3 , __A : Any=2 , __A : Tuple=2_4 , __A : Dict=1_6 , __A : List[str]=True , __A : List[Any]=True , __A : Any=3_2 , __A : List[str]=5 , __A : Optional[int]=4 , __A : List[str]=3_7 , __A : Optional[Any]="gelu" , __A : Optional[int]=0.1 , __A : Any=0.1 , __A : Union[str, Any]=1_0 , __A : List[Any]=0.02 , __A : Union[str, Any]=None , __A : Tuple=2 , __A : Any=2 , ): __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = patch_size __UpperCamelCase = max_length __UpperCamelCase = num_mel_bins __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = type_sequence_label_size __UpperCamelCase = initializer_range __UpperCamelCase = scope __UpperCamelCase = frequency_stride __UpperCamelCase = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) __UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 __UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1 __UpperCamelCase = frequency_out_dimension * time_out_dimension __UpperCamelCase = num_patches + 2 def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = self.get_config() return config, input_values, labels def _lowerCamelCase ( self : Dict ): return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def _lowerCamelCase ( self : Optional[Any] , __A : Any , __A : str , __A : str ): __UpperCamelCase = ASTModel(config=__A ) model.to(__A ) model.eval() __UpperCamelCase = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) = config_and_inputs __UpperCamelCase = {'input_values': input_values} return config, inputs_dict @require_torch class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] =( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ : int =( {"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ : Optional[Any] =False SCREAMING_SNAKE_CASE_ : str =False SCREAMING_SNAKE_CASE_ : str =False SCREAMING_SNAKE_CASE_ : Union[str, Any] =False def _lowerCamelCase ( self : Tuple , __A : int , __A : Optional[Any] , __A : List[str] , __A : Optional[Any] , __A : List[Any] ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def _lowerCamelCase ( self : Union[str, Any] ): __UpperCamelCase = ASTModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 ) def _lowerCamelCase ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason='AST does not use inputs_embeds' ) def _lowerCamelCase ( self : Tuple ): pass def _lowerCamelCase ( self : str ): __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(__A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A , nn.Linear ) ) def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(__A ) __UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase = [*signature.parameters.keys()] __UpperCamelCase = ['input_values'] self.assertListEqual(arg_names[:1] , __A ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) @slow def _lowerCamelCase ( self : Dict ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = ASTModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def lowercase__ ( ) -> Optional[int]: """simple docstring""" __UpperCamelCase = hf_hub_download( repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' ) __UpperCamelCase , __UpperCamelCase = torchaudio.load(__lowercase ) return audio, sampling_rate @require_torch @require_torchaudio class snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCamelCase ( self : int ): return ( ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ) if is_torchaudio_available() else None ) @slow def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = self.default_feature_extractor __UpperCamelCase = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(__A ) __UpperCamelCase = self.default_feature_extractor __UpperCamelCase , __UpperCamelCase = prepare_audio() __UpperCamelCase = audio.squeeze().numpy() __UpperCamelCase = feature_extractor(__A , sampling_rate=__A , return_tensors='pt' ).to(__A ) # forward pass with torch.no_grad(): __UpperCamelCase = model(**__A ) # verify the logits __UpperCamelCase = torch.Size((1, 5_2_7) ) self.assertEqual(outputs.logits.shape , __A ) __UpperCamelCase = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
53
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa" SCREAMING_SNAKE_CASE_ : Dict =( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) SCREAMING_SNAKE_CASE_ : List[str] ="document_qa" SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"] SCREAMING_SNAKE_CASE_ : Any =["text"] def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ): if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*__A , **__A ) def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ): __UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' __UpperCamelCase = task_prompt.replace('{user_input}' , __A ) __UpperCamelCase = self.pre_processor.tokenizer( __A , add_special_tokens=__A , return_tensors='pt' ).input_ids __UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ): return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences def _lowerCamelCase ( self : Tuple , __A : List[Any] ): __UpperCamelCase = self.pre_processor.batch_decode(__A )[0] __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) __UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token __UpperCamelCase = self.pre_processor.tokenajson(__A ) return sequence["answer"]
53
1
'''simple docstring''' from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge a__ : str =[ '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the''' ''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe''' ''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''', '''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal''' ''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s''' ''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the''' ''' body.''', '''Amnesty International releases its annual report on the death penalty. The report catalogs the use of''' ''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the''' ''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital''' ''' punishment.''', ] a__ : List[str] =[ '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''' ''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz''' ''' had informed his Lufthansa training school of an episode of severe depression, airline says .''', '''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .''' ''' Israel and the United States opposed the move, which could open the door to war crimes investigations against''' ''' Israelis .''', '''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to''' ''' death . Organization claims that governments around the world are using the threat of terrorism to advance''' ''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death''' ''' sentences up by 28% .''', ] def lowercase__ ( ) -> List[str]: """simple docstring""" __UpperCamelCase = calculate_rouge(__lowercase , __lowercase , bootstrap_aggregation=__lowercase , rouge_keys=['rouge2', 'rougeL'] ) assert isinstance(__lowercase , __lowercase ) __UpperCamelCase = calculate_rouge(__lowercase , __lowercase , bootstrap_aggregation=__lowercase , rouge_keys=['rouge2'] ) assert ( pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean() ) def lowercase__ ( ) -> str: """simple docstring""" __UpperCamelCase = 'rougeLsum' __UpperCamelCase = calculate_rouge(__lowercase , __lowercase , newline_sep=__lowercase , rouge_keys=[k] )[k] __UpperCamelCase = calculate_rouge(__lowercase , __lowercase , newline_sep=__lowercase , rouge_keys=[k] )[k] assert score > score_no_sep def lowercase__ ( ) -> Optional[int]: """simple docstring""" __UpperCamelCase = ['rouge1', 'rouge2', 'rougeL'] __UpperCamelCase = calculate_rouge(__lowercase , __lowercase , newline_sep=__lowercase , rouge_keys=__lowercase ) __UpperCamelCase = calculate_rouge(__lowercase , __lowercase , newline_sep=__lowercase , rouge_keys=__lowercase ) assert score_sep == score_no_sep def lowercase__ ( ) -> Optional[int]: """simple docstring""" __UpperCamelCase = [ 'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.', 'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .', ] __UpperCamelCase = [ 'Margot Frank, died in 1945, a month earlier than previously thought.', 'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of' ' the final seconds on board Flight 9525.', ] assert calculate_rouge(__lowercase , __lowercase , newline_sep=__lowercase ) == calculate_rouge(__lowercase , __lowercase , newline_sep=__lowercase ) def lowercase__ ( ) -> int: """simple docstring""" __UpperCamelCase = [ '" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" ' ] __UpperCamelCase = [ ' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .' ] __UpperCamelCase = calculate_rouge(__lowercase , __lowercase , rouge_keys=['rougeLsum'] , newline_sep=__lowercase )['rougeLsum'] __UpperCamelCase = calculate_rouge(__lowercase , __lowercase , rouge_keys=['rougeLsum'] )['rougeLsum'] assert new_score > prev_score def lowercase__ ( ) -> str: """simple docstring""" __UpperCamelCase = Path('examples/seq2seq/test_data/wmt_en_ro' ) __UpperCamelCase = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) ) assert isinstance(__lowercase , __lowercase ) __UpperCamelCase = calculate_rouge_path( data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=__lowercase ) assert isinstance(__lowercase , __lowercase )
53
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
53
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a__ : List[str] =logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''') a__ : Union[str, Any] =list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) a__ : Optional[Any] =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def lowercase__ ( __lowercase : str ) -> int: """simple docstring""" with open(__lowercase , 'rb' ) as f: __UpperCamelCase = Image.open(__lowercase ) return im.convert('RGB' ) @dataclass class snake_case : """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={ "help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)." } , ) SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) SCREAMING_SNAKE_CASE_ : Optional[str] =field(default=__lowerCamelCase , metadata={"help": "A folder containing the training data."} ) SCREAMING_SNAKE_CASE_ : Optional[str] =field(default=__lowerCamelCase , metadata={"help": "A folder containing the validation data."} ) SCREAMING_SNAKE_CASE_ : Optional[float] =field( default=0.15 , metadata={"help": "Percent to split off of train for validation."} ) SCREAMING_SNAKE_CASE_ : Optional[int] =field( default=__lowerCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) SCREAMING_SNAKE_CASE_ : Optional[int] =field( default=__lowerCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def _lowerCamelCase ( self : Tuple ): if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( 'You must specify either a dataset name from the hub or a train and/or validation directory.' ) @dataclass class snake_case : """simple docstring""" SCREAMING_SNAKE_CASE_ : str =field( default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , ) SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__lowerCamelCase )} , ) SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ : Optional[str] =field( default=__lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) SCREAMING_SNAKE_CASE_ : str =field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) SCREAMING_SNAKE_CASE_ : str =field(default=__lowerCamelCase , metadata={"help": "Name or path of preprocessor config."} ) SCREAMING_SNAKE_CASE_ : bool =field( default=__lowerCamelCase , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) SCREAMING_SNAKE_CASE_ : bool =field( default=__lowerCamelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def lowercase__ ( __lowercase : Any ) -> str: """simple docstring""" __UpperCamelCase = torch.stack([example['pixel_values'] for example in examples] ) __UpperCamelCase = torch.tensor([example['labels'] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def lowercase__ ( ) -> Optional[int]: """simple docstring""" __UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_image_classification' , __lowercase , __lowercase ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __UpperCamelCase = training_args.get_process_log_level() logger.setLevel(__lowercase ) transformers.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __UpperCamelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __UpperCamelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: __UpperCamelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , ) else: __UpperCamelCase = {} if data_args.train_dir is not None: __UpperCamelCase = os.path.join(data_args.train_dir , '**' ) if data_args.validation_dir is not None: __UpperCamelCase = os.path.join(data_args.validation_dir , '**' ) __UpperCamelCase = load_dataset( 'imagefolder' , data_files=__lowercase , cache_dir=model_args.cache_dir , task='image-classification' , ) # If we don't have a validation split, split off a percentage of train as validation. __UpperCamelCase = None if 'validation' in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0: __UpperCamelCase = dataset['train'].train_test_split(data_args.train_val_split ) __UpperCamelCase = split['train'] __UpperCamelCase = split['test'] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. __UpperCamelCase = dataset['train'].features['labels'].names __UpperCamelCase , __UpperCamelCase = {}, {} for i, label in enumerate(__lowercase ): __UpperCamelCase = str(__lowercase ) __UpperCamelCase = label # Load the accuracy metric from the datasets package __UpperCamelCase = evaluate.load('accuracy' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__lowercase : Optional[int] ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) __UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowercase ) , labelaid=__lowercase , idalabel=__lowercase , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __UpperCamelCase = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) __UpperCamelCase = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: __UpperCamelCase = image_processor.size['shortest_edge'] else: __UpperCamelCase = (image_processor.size['height'], image_processor.size['width']) __UpperCamelCase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) __UpperCamelCase = Compose( [ RandomResizedCrop(__lowercase ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) __UpperCamelCase = Compose( [ Resize(__lowercase ), CenterCrop(__lowercase ), ToTensor(), normalize, ] ) def train_transforms(__lowercase : List[str] ): __UpperCamelCase = [ _train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image'] ] return example_batch def val_transforms(__lowercase : Union[str, Any] ): __UpperCamelCase = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: __UpperCamelCase = ( dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(__lowercase ) if training_args.do_eval: if "validation" not in dataset: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: __UpperCamelCase = ( dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(__lowercase ) # Initalize our trainer __UpperCamelCase = Trainer( model=__lowercase , args=__lowercase , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: __UpperCamelCase = None if training_args.resume_from_checkpoint is not None: __UpperCamelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: __UpperCamelCase = last_checkpoint __UpperCamelCase = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __UpperCamelCase = trainer.evaluate() trainer.log_metrics('eval' , __lowercase ) trainer.save_metrics('eval' , __lowercase ) # Write model card and (optionally) push to hub __UpperCamelCase = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'image-classification', 'dataset': data_args.dataset_name, 'tags': ['image-classification', 'vision'], } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase ) else: trainer.create_model_card(**__lowercase ) if __name__ == "__main__": main()
53
'''simple docstring''' import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase__ ( __lowercase : Features ) -> Optional[int]: """simple docstring""" __UpperCamelCase = np.inf def set_batch_size(__lowercase : FeatureType ) -> None: nonlocal batch_size if isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary": __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__lowercase , __lowercase ) return None if batch_size is np.inf else batch_size class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : List[str] , __A : NestedDataStructureLike[PathLike] , __A : Optional[NamedSplit] = None , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[int] = None , **__A : Dict , ): super().__init__( __A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , ) __UpperCamelCase = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths} __UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1] __UpperCamelCase = Parquet( cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , ) def _lowerCamelCase ( self : Optional[int] ): # Build iterable dataset if self.streaming: __UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None self.builder.download_and_prepare( download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , ) __UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=__A , in_memory=self.keep_in_memory ) return dataset class snake_case : """simple docstring""" def __init__( self : List[str] , __A : Dataset , __A : Union[PathLike, BinaryIO] , __A : Optional[int] = None , **__A : Dict , ): __UpperCamelCase = dataset __UpperCamelCase = path_or_buf __UpperCamelCase = batch_size or get_writer_batch_size(dataset.features ) __UpperCamelCase = parquet_writer_kwargs def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , 'wb+' ) as buffer: __UpperCamelCase = self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs ) else: __UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs ) return written def _lowerCamelCase ( self : List[str] , __A : BinaryIO , __A : int , **__A : List[str] ): __UpperCamelCase = 0 __UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __A ) __UpperCamelCase = self.dataset.features.arrow_schema __UpperCamelCase = pq.ParquetWriter(__A , schema=__A , **__A ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): __UpperCamelCase = query_table( table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__A ) written += batch.nbytes writer.close() return written
53
1
'''simple docstring''' def lowercase__ ( __lowercase : float , __lowercase : float , __lowercase : int ) -> float: """simple docstring""" if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(__lowercase , __lowercase ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate __UpperCamelCase = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly __UpperCamelCase = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
53
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def lowercase__ ( __lowercase : SplitDict ) -> int: """simple docstring""" __UpperCamelCase = split_dict._to_yaml_list() assert len(__lowercase ) == len(__lowercase ) __UpperCamelCase = SplitDict._from_yaml_list(__lowercase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump __UpperCamelCase = None # the split name of split_dict takes over the name of the split info object __UpperCamelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] ) def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" __UpperCamelCase = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
53
1
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class snake_case ( datasets.BeamBasedBuilder ): """simple docstring""" def _lowerCamelCase ( self : Optional[Any] ): return datasets.DatasetInfo( features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=__A , ) def _lowerCamelCase ( self : List[str] , __A : int , __A : Any ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )] def _lowerCamelCase ( self : Union[str, Any] , __A : List[str] , __A : Optional[Any] ): import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(__A ) class snake_case ( datasets.BeamBasedBuilder ): """simple docstring""" def _lowerCamelCase ( self : List[str] ): return datasets.DatasetInfo( features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=__A , ) def _lowerCamelCase ( self : Tuple , __A : List[Any] , __A : Any ): return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} ) ] def _lowerCamelCase ( self : int , __A : Tuple , __A : Tuple ): import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(__A ) def lowercase__ ( ) -> List[str]: """simple docstring""" return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )] def lowercase__ ( ) -> str: """simple docstring""" return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )] class snake_case ( __lowerCamelCase ): """simple docstring""" @require_beam def _lowerCamelCase ( self : Dict ): __UpperCamelCase = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase = DummyBeamDataset(cache_dir=__A , beam_runner='DirectRunner' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(__A , builder.name , 'default' , '0.0.0' , f'''{builder.name}-train.arrow''' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) ) __UpperCamelCase = builder.as_dataset() self.assertEqual(dset['train'].num_rows , __A ) self.assertEqual(dset['train'].info.splits['train'].num_examples , __A ) self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(__A , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset @require_beam def _lowerCamelCase ( self : str ): import apache_beam as beam __UpperCamelCase = beam.io.parquetio.WriteToParquet __UpperCamelCase = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase = DummyBeamDataset(cache_dir=__A , beam_runner='DirectRunner' ) with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock: __UpperCamelCase = partial(__A , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( __A , builder.name , 'default' , '0.0.0' , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) ) self.assertTrue( os.path.exists( os.path.join( __A , builder.name , 'default' , '0.0.0' , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) ) __UpperCamelCase = builder.as_dataset() self.assertEqual(dset['train'].num_rows , __A ) self.assertEqual(dset['train'].info.splits['train'].num_examples , __A ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) ) self.assertTrue( os.path.exists(os.path.join(__A , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset @require_beam def _lowerCamelCase ( self : Dict ): with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase = DummyBeamDataset(cache_dir=__A ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def _lowerCamelCase ( self : int ): __UpperCamelCase = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: __UpperCamelCase = NestedBeamDataset(cache_dir=__A , beam_runner='DirectRunner' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(__A , builder.name , 'default' , '0.0.0' , f'''{builder.name}-train.arrow''' ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) ) __UpperCamelCase = builder.as_dataset() self.assertEqual(dset['train'].num_rows , __A ) self.assertEqual(dset['train'].info.splits['train'].num_examples , __A ) self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(__A , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset
53
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[str] ={ '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any =[ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
1
'''simple docstring''' import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter a__ : str =True except ImportError: a__ : Tuple =False a__ : int =logging.get_logger(__name__) # pylint: disable=invalid-name def lowercase__ ( __lowercase : Namespace ) -> List[str]: """simple docstring""" return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class snake_case ( __lowerCamelCase ): """simple docstring""" @staticmethod def _lowerCamelCase ( __A : ArgumentParser ): __UpperCamelCase = parser.add_parser('add-new-model' ) add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' ) add_new_model_parser.add_argument('--testing_file' , type=__A , help='Configuration file on which to run.' ) add_new_model_parser.add_argument( '--path' , type=__A , help='Path to cookiecutter. Should only be used for testing purposes.' ) add_new_model_parser.set_defaults(func=__A ) def __init__( self : Dict , __A : bool , __A : str , __A : Any=None , *__A : Optional[int] ): __UpperCamelCase = testing __UpperCamelCase = testing_file __UpperCamelCase = path def _lowerCamelCase ( self : str ): warnings.warn( 'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. ' 'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality ' 'checks, you should use `transformers-cli add-new-model-like` instead.' ) if not _has_cookiecutter: raise ImportError( 'Model creation dependencies are required to use the `add_new_model` command. Install them by running ' 'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory __UpperCamelCase = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:2_2]] if len(__A ) > 0: raise ValueError( 'Several directories starting with `cookiecutter-template-` in current working directory. ' 'Please clean your directory by removing all folders starting with `cookiecutter-template-` or ' 'change your working directory.' ) __UpperCamelCase = ( Path(__A ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) __UpperCamelCase = path_to_transformer_root / 'templates' / 'adding_a_new_model' # Execute cookiecutter if not self._testing: cookiecutter(str(__A ) ) else: with open(self._testing_file , 'r' ) as configuration_file: __UpperCamelCase = json.load(__A ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__A , extra_context=__A , ) __UpperCamelCase = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:2_2]][0] # Retrieve configuration with open(directory + '/configuration.json' , 'r' ) as configuration_file: __UpperCamelCase = json.load(__A ) __UpperCamelCase = configuration['lowercase_modelname'] __UpperCamelCase = configuration['generate_tensorflow_pytorch_and_flax'] os.remove(f'''{directory}/configuration.json''' ) __UpperCamelCase = 'PyTorch' in generate_tensorflow_pytorch_and_flax __UpperCamelCase = 'TensorFlow' in generate_tensorflow_pytorch_and_flax __UpperCamelCase = 'Flax' in generate_tensorflow_pytorch_and_flax __UpperCamelCase = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}''' os.makedirs(__A , exist_ok=__A ) os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=__A ) # Tests require submodules as they have parent imports with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , 'w' ): pass shutil.move( f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , ) shutil.move( f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , ) def remove_copy_lines(__A : Any ): with open(__A , 'r' ) as f: __UpperCamelCase = f.readlines() with open(__A , 'w' ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(__A ) if output_pytorch: if not self._testing: remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' ) shutil.move( f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , ) shutil.move( f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , ) else: os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' ) os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' ) if output_tensorflow: if not self._testing: remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) shutil.move( f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , ) shutil.move( f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , ) else: os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' ) if output_flax: if not self._testing: remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) shutil.move( f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , ) shutil.move( f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , ) else: os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' ) shutil.move( f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , ) shutil.move( f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , ) shutil.move( f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(__A : str , __A : str , __A : List[str] ): # Create temp file __UpperCamelCase , __UpperCamelCase = mkstemp() __UpperCamelCase = False with fdopen(__A , 'w' ) as new_file: with open(__A ) as old_file: for line in old_file: new_file.write(__A ) if line_to_copy_below in line: __UpperCamelCase = True for line_to_copy in lines_to_copy: new_file.write(__A ) if not line_found: raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' ) # Copy the file permissions from the old file to the new file copymode(__A , __A ) # Remove original file remove(__A ) # Move new file move(__A , __A ) def skip_units(__A : Dict ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(__A : Optional[Any] ): with open(__A ) as datafile: __UpperCamelCase = [] __UpperCamelCase = False __UpperCamelCase = False for line in datafile: if "# To replace in: " in line and "##" not in line: __UpperCamelCase = line.split('"' )[1] __UpperCamelCase = skip_units(__A ) elif "# Below: " in line and "##" not in line: __UpperCamelCase = line.split('"' )[1] __UpperCamelCase = skip_units(__A ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(__A , __A , __A ) __UpperCamelCase = [] elif "# Replace with" in line and "##" not in line: __UpperCamelCase = [] elif "##" not in line: lines_to_copy.append(__A ) remove(__A ) replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' ) os.rmdir(__A )
53
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a__ : str =logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str =["input_features", "attention_mask"] def __init__( self : Union[str, Any] , __A : Optional[int]=8_0 , __A : Tuple=1_6_0_0_0 , __A : Optional[Any]=8_0 , __A : Any=0.0 , __A : Any=True , __A : List[str]=True , __A : str=True , **__A : List[Any] , ): super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A ) __UpperCamelCase = num_mel_bins __UpperCamelCase = do_ceptral_normalize __UpperCamelCase = normalize_means __UpperCamelCase = normalize_vars __UpperCamelCase = True def _lowerCamelCase ( self : Union[str, Any] , __A : np.ndarray , ): __UpperCamelCase = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers __UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 ) __UpperCamelCase = ta_kaldi.fbank(__A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _lowerCamelCase ( __A : np.ndarray , __A : int , __A : Optional[bool] = True , __A : Optional[bool] = True , __A : float = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: __UpperCamelCase = x[:input_length].mean(axis=0 ) __UpperCamelCase = np.subtract(__A , __A ) if normalize_vars: __UpperCamelCase = x[:input_length].std(axis=0 ) __UpperCamelCase = np.divide(__A , __A ) if input_length < x.shape[0]: __UpperCamelCase = padding_value # make sure array is in float32 __UpperCamelCase = x.astype(np.floataa ) return x def _lowerCamelCase ( self : int , __A : List[np.ndarray] , __A : Optional[np.ndarray] = None ): __UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__A , __A , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(__A , __A ) ] def __call__( self : List[Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : Union[bool, str, PaddingStrategy] = False , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[int] = None , __A : Optional[bool] = None , **__A : Dict , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __UpperCamelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) __UpperCamelCase = is_batched_numpy or ( isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__A , np.ndarray ): __UpperCamelCase = np.asarray(__A , dtype=np.floataa ) elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __UpperCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __UpperCamelCase = [raw_speech] # extract fbank features __UpperCamelCase = [self._extract_fbank_features(__A ) for waveform in raw_speech] # convert into correct format for padding __UpperCamelCase = BatchFeature({'input_features': features} ) __UpperCamelCase = self.pad( __A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , ) # make sure list is in array format __UpperCamelCase = padded_inputs.get('input_features' ) if isinstance(input_features[0] , __A ): __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_features] __UpperCamelCase = padded_inputs.get('attention_mask' ) if attention_mask is not None: __UpperCamelCase = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __UpperCamelCase = ( np.array(__A , dtype=np.intaa ) if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD else None ) __UpperCamelCase = self.normalize( padded_inputs['input_features'] , attention_mask=__A ) if return_tensors is not None: __UpperCamelCase = padded_inputs.convert_to_tensors(__A ) return padded_inputs
53
1
'''simple docstring''' import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class snake_case ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict =RoCBertTokenizer SCREAMING_SNAKE_CASE_ : int =None SCREAMING_SNAKE_CASE_ : Any =False SCREAMING_SNAKE_CASE_ : int =True SCREAMING_SNAKE_CASE_ : Optional[Any] =filter_non_english def _lowerCamelCase ( self : int ): super().setUp() __UpperCamelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd'] __UpperCamelCase = {} __UpperCamelCase = {} for i, value in enumerate(__A ): __UpperCamelCase = i __UpperCamelCase = i __UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] ) __UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer: json.dump(__A , __A , ensure_ascii=__A ) with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer: json.dump(__A , __A , ensure_ascii=__A ) def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __UpperCamelCase = tokenizer.tokenize('你好[SEP]你是谁' ) self.assertListEqual(__A , ['你', '好', '[SEP]', '你', '是', '谁'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__A ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__A ) , [5, 6, 2, 5, 7, 8] ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=__A ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def _lowerCamelCase ( self : int ): __UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def _lowerCamelCase ( self : int ): __UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=__A ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=__A ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = RoCBertBasicTokenizer(do_lower_case=__A , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] __UpperCamelCase = {} for i, token in enumerate(__A ): __UpperCamelCase = i __UpperCamelCase = RoCBertWordpieceTokenizer(vocab=__A , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def _lowerCamelCase ( self : Dict ): self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def _lowerCamelCase ( self : Dict ): self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def _lowerCamelCase ( self : int ): self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__A ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) if self.test_rust_tokenizer: __UpperCamelCase = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(__A ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) def _lowerCamelCase ( self : Dict ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__A , **__A ) __UpperCamelCase = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __UpperCamelCase = tokenizer_r.encode_plus( __A , return_attention_mask=__A , return_token_type_ids=__A , return_offsets_mapping=__A , add_special_tokens=__A , ) __UpperCamelCase = tokenizer_r.do_lower_case if hasattr(__A , 'do_lower_case' ) else False __UpperCamelCase = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), 'Allen'), ((2_1, 2_3), '##NL'), ((2_3, 2_4), '##P'), ((2_5, 3_3), 'sentence'), ((3_3, 3_4), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), 'allen'), ((2_1, 2_3), '##nl'), ((2_3, 2_4), '##p'), ((2_5, 3_3), 'sentence'), ((3_3, 3_4), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = ['的', '人', '有'] __UpperCamelCase = ''.join(__A ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCamelCase = True __UpperCamelCase = self.tokenizer_class.from_pretrained(__A , **__A ) __UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__A , **__A ) __UpperCamelCase = tokenizer_p.encode(__A , add_special_tokens=__A ) __UpperCamelCase = tokenizer_r.encode(__A , add_special_tokens=__A ) __UpperCamelCase = tokenizer_r.convert_ids_to_tokens(__A ) __UpperCamelCase = tokenizer_p.convert_ids_to_tokens(__A ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__A , __A ) self.assertListEqual(__A , __A ) __UpperCamelCase = False __UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__A , **__A ) __UpperCamelCase = self.tokenizer_class.from_pretrained(__A , **__A ) __UpperCamelCase = tokenizer_r.encode(__A , add_special_tokens=__A ) __UpperCamelCase = tokenizer_p.encode(__A , add_special_tokens=__A ) __UpperCamelCase = tokenizer_r.convert_ids_to_tokens(__A ) __UpperCamelCase = tokenizer_p.convert_ids_to_tokens(__A ) # it is expected that only the first Chinese character is not preceded by "##". __UpperCamelCase = [ f'''##{token}''' if idx != 0 else token for idx, token in enumerate(__A ) ] self.assertListEqual(__A , __A ) self.assertListEqual(__A , __A ) @slow def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) __UpperCamelCase = tokenizer.encode('你好' , add_special_tokens=__A ) __UpperCamelCase = tokenizer.encode('你是谁' , add_special_tokens=__A ) __UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__A ) __UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__A , __A ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = self.get_tokenizers(do_lower_case=__A ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCamelCase = '你好,你是谁' __UpperCamelCase = tokenizer.tokenize(__A ) __UpperCamelCase = tokenizer.convert_tokens_to_ids(__A ) __UpperCamelCase = tokenizer.convert_tokens_to_shape_ids(__A ) __UpperCamelCase = tokenizer.convert_tokens_to_pronunciation_ids(__A ) __UpperCamelCase = tokenizer.prepare_for_model( __A , __A , __A , add_special_tokens=__A ) __UpperCamelCase = tokenizer.encode_plus(__A , add_special_tokens=__A ) self.assertEqual(__A , __A )
53
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : List[Any] =logging.get_logger(__name__) a__ : List[Any] ={ '''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model" def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ): super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = project_dim class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model" def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ): super().__init__(**__A ) __UpperCamelCase = hidden_size __UpperCamelCase = intermediate_size __UpperCamelCase = projection_dim __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = num_channels __UpperCamelCase = patch_size __UpperCamelCase = image_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = attention_dropout __UpperCamelCase = layer_norm_eps __UpperCamelCase = hidden_act @classmethod def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ): cls._set_token_in_kwargs(__A ) __UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('model_type' ) == "altclip": __UpperCamelCase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] ="altclip" SCREAMING_SNAKE_CASE_ : Optional[int] =True def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). __UpperCamelCase = kwargs.pop('text_config_dict' , __A ) __UpperCamelCase = kwargs.pop('vision_config_dict' , __A ) super().__init__(**__A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __UpperCamelCase = {} # This is the complete result when using `text_config_dict`. __UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __UpperCamelCase = {} # This is the complete result when using `vision_config_dict`. __UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __UpperCamelCase = { str(__A ): value for key, value in _vision_config_dict['id2label'].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __UpperCamelCase = {} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' ) if vision_config is None: __UpperCamelCase = {} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' ) __UpperCamelCase = AltCLIPTextConfig(**__A ) __UpperCamelCase = AltCLIPVisionConfig(**__A ) __UpperCamelCase = projection_dim __UpperCamelCase = logit_scale_init_value __UpperCamelCase = 1.0 @classmethod def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = copy.deepcopy(self.__dict__ ) __UpperCamelCase = self.text_config.to_dict() __UpperCamelCase = self.vision_config.to_dict() __UpperCamelCase = self.__class__.model_type return output
53
1
'''simple docstring''' import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] =ComputeEnvironment.AMAZON_SAGEMAKER SCREAMING_SNAKE_CASE_ : str =True SCREAMING_SNAKE_CASE_ : Dict ="ml.p3.2xlarge" SCREAMING_SNAKE_CASE_ : Dict ="accelerate_sagemaker_execution_role" SCREAMING_SNAKE_CASE_ : Any ="hf-sm" SCREAMING_SNAKE_CASE_ : List[Any] ="us-east-1" SCREAMING_SNAKE_CASE_ : Any =1 SCREAMING_SNAKE_CASE_ : List[Any] ="accelerate-sagemaker-1" SCREAMING_SNAKE_CASE_ : Optional[Any] ="1.6" SCREAMING_SNAKE_CASE_ : Dict ="4.4" SCREAMING_SNAKE_CASE_ : Optional[Any] ="train.py" SCREAMING_SNAKE_CASE_ : List[Any] =[ "--model_name_or_path", "bert", "--do_train", "False", "--epochs", "3", "--learning_rate", "5e-5", "--max_steps", "50.5", ] SCREAMING_SNAKE_CASE_ : Tuple =[ "--model_name_or_path", "bert", "--do_train", "--do_test", "False", "--do_predict", "--epochs", "3", "--learning_rate", "5e-5", "--max_steps", "50.5", ] class snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCamelCase ( self : Tuple ): # If no defaults are changed, `to_kwargs` returns an empty dict. __UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args['model_name_or_path'] , __A ) assert isinstance(converted_args['do_train'] , __A ) assert isinstance(converted_args['epochs'] , __A ) assert isinstance(converted_args['learning_rate'] , __A ) assert isinstance(converted_args['max_steps'] , __A ) with pytest.raises(__A ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
53
'''simple docstring''' import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Any ) -> Optional[Any]: """simple docstring""" with open(__lowercase ) as metadata_file: __UpperCamelCase = json.load(__lowercase ) __UpperCamelCase = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['model_config'] ) # Load in the weights from the checkpoint_path __UpperCamelCase = torch.load(__lowercase , map_location='cpu' ) # Load the entity vocab file __UpperCamelCase = load_entity_vocab(__lowercase ) __UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks __UpperCamelCase = AddedToken('<ent>' , lstrip=__lowercase , rstrip=__lowercase ) __UpperCamelCase = AddedToken('<ent2>' , lstrip=__lowercase , rstrip=__lowercase ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(__lowercase ) with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(__lowercase , __lowercase ) __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase ) # Initialize the embeddings of the special tokens __UpperCamelCase = state_dict['embeddings.word_embeddings.weight'] __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 ) __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 ) __UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __UpperCamelCase = F'''encoder.layer.{layer_index}.attention.self.''' __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight'] __UpperCamelCase = entity_emb[entity_vocab['[MASK]']] __UpperCamelCase = LukeModel(config=__lowercase ).eval() __UpperCamelCase , __UpperCamelCase = model.load_state_dict(__lowercase , strict=__lowercase ) if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'''Missing keys {', '.join(__lowercase )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )): raise ValueError( 'Unexpected keys' F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' ) # Check outputs __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase , task='entity_classification' ) __UpperCamelCase = ( 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the' ' new world number one avoid a humiliating second- round exit at Wimbledon .' ) __UpperCamelCase = (39, 42) __UpperCamelCase = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors='pt' ) __UpperCamelCase = model(**__lowercase ) # Verify word hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 42, 1024) ) __UpperCamelCase = torch.tensor( [[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] ) else: # base __UpperCamelCase = torch.Size((1, 42, 768) ) __UpperCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 1, 1024) ) __UpperCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] ) else: # base __UpperCamelCase = torch.Size((1, 1, 768) ) __UpperCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(__lowercase ) ) model.save_pretrained(__lowercase ) def lowercase__ ( __lowercase : Dict ) -> List[str]: """simple docstring""" __UpperCamelCase = {} with open(__lowercase , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(__lowercase ): __UpperCamelCase , __UpperCamelCase = line.rstrip().split('\t' ) __UpperCamelCase = index return entity_vocab if __name__ == "__main__": a__ : Any =argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) a__ : str =parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
53
1
'''simple docstring''' import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a__ : Dict =logging.get_logger(__name__) a__ : Optional[Any] ='''▁''' a__ : List[str] ={'''vocab_file''': '''prophetnet.tokenizer'''} a__ : List[str] ={ '''vocab_file''': { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer''' ), } } a__ : List[Any] ={ '''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False}, } a__ : Any ={ '''microsoft/xprophetnet-large-wiki100-cased''': 512, } def lowercase__ ( __lowercase : List[str] ) -> Optional[Any]: """simple docstring""" __UpperCamelCase = collections.OrderedDict() with open(__lowercase , 'r' , encoding='utf-8' ) as reader: __UpperCamelCase = reader.readlines() for index, token in enumerate(__lowercase ): __UpperCamelCase = token.rstrip('\n' ) __UpperCamelCase = index return vocab class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : str =["input_ids", "attention_mask"] def __init__( self : Dict , __A : Union[str, Any] , __A : Any="[SEP]" , __A : Optional[Any]="[SEP]" , __A : int="[SEP]" , __A : Any="[UNK]" , __A : Union[str, Any]="[PAD]" , __A : Any="[CLS]" , __A : Tuple="[MASK]" , __A : Optional[Dict[str, Any]] = None , **__A : Tuple , ): __UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__A , eos_token=__A , sep_token=__A , unk_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , ) try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise __UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__A ) ) __UpperCamelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab __UpperCamelCase = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4} for i in range(1_0 ): __UpperCamelCase = f'''[unused{i}]''' __UpperCamelCase = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab __UpperCamelCase = 1_2 __UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(__A ) def __getstate__( self : Union[str, Any] ): __UpperCamelCase = self.__dict__.copy() __UpperCamelCase = None return state def __setstate__( self : Any , __A : Dict ): __UpperCamelCase = d try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __UpperCamelCase = {} __UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCamelCase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return ([0] * len(__A )) + [1] return ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1] def _lowerCamelCase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ): __UpperCamelCase = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCamelCase ( self : Optional[Any] ): return len(self.sp_model ) + self.fairseq_offset def _lowerCamelCase ( self : Dict ): __UpperCamelCase = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCamelCase ( self : int , __A : str ): return self.sp_model.encode(__A , out_type=__A ) def _lowerCamelCase ( self : Tuple , __A : Tuple ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCamelCase = self.sp_model.PieceToId(__A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCamelCase ( self : Optional[Any] , __A : List[Any] ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[int] ): __UpperCamelCase = ''.join(__A ).replace(__A , ' ' ).strip() return out_string def _lowerCamelCase ( self : Tuple , __A : str , __A : Optional[str] = None ): if not os.path.isdir(__A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCamelCase = os.path.join( __A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __A ) elif not os.path.isfile(self.vocab_file ): with open(__A , 'wb' ) as fi: __UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(__A ) return (out_vocab_file,) def _lowerCamelCase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None ): if token_ids_a is None: return token_ids_a + [self.sep_token_id] __UpperCamelCase = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
53
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class snake_case ( __lowerCamelCase ): """simple docstring""" def _lowerCamelCase ( self : Any ): __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = 8 # DPR tok __UpperCamelCase = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __UpperCamelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok __UpperCamelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) ) __UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __UpperCamelCase = {'unk_token': '<unk>'} __UpperCamelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__A ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__A ) ) def _lowerCamelCase ( self : Tuple ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Optional[int] ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Union[str, Any] ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def _lowerCamelCase ( self : str ): shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _lowerCamelCase ( self : Any , __A : bool ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , ) if from_disk: __UpperCamelCase = os.path.join(self.tmpdirname , 'dataset' ) __UpperCamelCase = os.path.join(self.tmpdirname , 'index.faiss' ) dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) ) dataset.drop_index('embeddings' ) dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) ) del dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , ) return retriever def _lowerCamelCase ( self : int ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) __UpperCamelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' ) dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' ) pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) ) __UpperCamelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' ) __UpperCamelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset} pickle.dump(__A , open(__A , 'wb' ) ) __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , ) __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = self.get_dummy_dataset() retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_legacy_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] ) self.assertEqual(len(doc_dicts[0]['text'] ) , __A ) self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Optional[Any] ): import torch __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , np.ndarray ) __UpperCamelCase = retriever( __A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( # noqa: F841 out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], out['doc_ids'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dpr_ctx_encoder_tokenizer() __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) retriever.set_ctx_encoder_tokenizer(__A ) __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) self.assertEqual( len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
53
1
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def lowercase__ ( __lowercase : List[str] ) -> Tuple: """simple docstring""" __UpperCamelCase = 384 if "tiny" in model_name: __UpperCamelCase = [3, 3, 9, 3] __UpperCamelCase = [96, 192, 384, 768] if "small" in model_name: __UpperCamelCase = [3, 3, 27, 3] __UpperCamelCase = [96, 192, 384, 768] if "base" in model_name: __UpperCamelCase = [3, 3, 27, 3] __UpperCamelCase = [128, 256, 512, 1024] __UpperCamelCase = 512 if "large" in model_name: __UpperCamelCase = [3, 3, 27, 3] __UpperCamelCase = [192, 384, 768, 1536] __UpperCamelCase = 768 if "xlarge" in model_name: __UpperCamelCase = [3, 3, 27, 3] __UpperCamelCase = [256, 512, 1024, 2048] __UpperCamelCase = 1024 # set label information __UpperCamelCase = 150 __UpperCamelCase = 'huggingface/label-files' __UpperCamelCase = 'ade20k-id2label.json' __UpperCamelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset' ) , 'r' ) ) __UpperCamelCase = {int(__lowercase ): v for k, v in idalabel.items()} __UpperCamelCase = {v: k for k, v in idalabel.items()} __UpperCamelCase = ConvNextConfig( depths=__lowercase , hidden_sizes=__lowercase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) __UpperCamelCase = UperNetConfig( backbone_config=__lowercase , auxiliary_in_channels=__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase , ) return config def lowercase__ ( __lowercase : int ) -> str: """simple docstring""" __UpperCamelCase = [] # fmt: off # stem rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') ) rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') ) rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') ) rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def lowercase__ ( __lowercase : Dict , __lowercase : Optional[int] , __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = dct.pop(__lowercase ) __UpperCamelCase = val def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : Dict ) -> Any: """simple docstring""" __UpperCamelCase = { 'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth', 'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth', 'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth', 'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth', 'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth', } __UpperCamelCase = model_name_to_url[model_name] __UpperCamelCase = torch.hub.load_state_dict_from_url(__lowercase , map_location='cpu' )['state_dict'] __UpperCamelCase = get_upernet_config(__lowercase ) __UpperCamelCase = UperNetForSemanticSegmentation(__lowercase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): __UpperCamelCase = state_dict.pop(__lowercase ) if "bn" in key: __UpperCamelCase = key.replace('bn' , 'batch_norm' ) __UpperCamelCase = val # rename keys __UpperCamelCase = create_rename_keys(__lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) model.load_state_dict(__lowercase ) # verify on image __UpperCamelCase = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' __UpperCamelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('RGB' ) __UpperCamelCase = SegformerImageProcessor() __UpperCamelCase = processor(__lowercase , return_tensors='pt' ).pixel_values with torch.no_grad(): __UpperCamelCase = model(__lowercase ) if model_name == "upernet-convnext-tiny": __UpperCamelCase = torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ) elif model_name == "upernet-convnext-small": __UpperCamelCase = torch.tensor( [[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] ) elif model_name == "upernet-convnext-base": __UpperCamelCase = torch.tensor( [[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] ) elif model_name == "upernet-convnext-large": __UpperCamelCase = torch.tensor( [[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] ) elif model_name == "upernet-convnext-xlarge": __UpperCamelCase = torch.tensor( [[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowercase , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__lowercase ) if push_to_hub: print(F'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(F'''openmmlab/{model_name}''' ) processor.push_to_hub(F'''openmmlab/{model_name}''' ) if __name__ == "__main__": a__ : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-convnext-tiny''', type=str, choices=[f'upernet-convnext-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']], help='''Name of the ConvNext UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) a__ : List[Any] =parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
53
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[Any] ={ '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] =[ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys a__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging a__ : Any =logging.get_logger(__name__) a__ : Optional[Any] ={ '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict ="gpt_neo" SCREAMING_SNAKE_CASE_ : Optional[int] =["past_key_values"] SCREAMING_SNAKE_CASE_ : List[Any] ={"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Union[str, Any] , __A : Union[str, Any]=5_0_2_5_7 , __A : Any=2_0_4_8 , __A : Optional[Any]=2_0_4_8 , __A : Any=2_4 , __A : Union[str, Any]=[[["global", "local"], 1_2]] , __A : str=1_6 , __A : Optional[int]=None , __A : Union[str, Any]=2_5_6 , __A : Any="gelu_new" , __A : Dict=0.0 , __A : Optional[int]=0.0 , __A : int=0.0 , __A : List[str]=0.1 , __A : Any=1e-5 , __A : int=0.02 , __A : List[str]=True , __A : Tuple=5_0_2_5_6 , __A : Optional[Any]=5_0_2_5_6 , **__A : Optional[Any] , ): __UpperCamelCase = vocab_size __UpperCamelCase = max_position_embeddings __UpperCamelCase = hidden_size __UpperCamelCase = num_layers __UpperCamelCase = num_heads __UpperCamelCase = intermediate_size __UpperCamelCase = window_size __UpperCamelCase = activation_function __UpperCamelCase = resid_dropout __UpperCamelCase = embed_dropout __UpperCamelCase = attention_dropout __UpperCamelCase = classifier_dropout __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = initializer_range __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id __UpperCamelCase = attention_types __UpperCamelCase = self.expand_attention_types_params(__A ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=__A , eos_token_id=__A , **__A ) @staticmethod def _lowerCamelCase ( __A : Tuple ): __UpperCamelCase = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase__ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Any: """simple docstring""" import torch __UpperCamelCase = input.size() __UpperCamelCase = len(__lowercase ) __UpperCamelCase = shape[dimension] __UpperCamelCase = torch.arange(0 , __lowercase , __lowercase ) __UpperCamelCase = torch.div(sizedim - size , __lowercase , rounding_mode='floor' ) + 1 __UpperCamelCase = torch.arange(__lowercase ) + low_indices[:min_length][:, None] __UpperCamelCase = [slice(__lowercase )] * rank __UpperCamelCase = indices __UpperCamelCase = input[s] __UpperCamelCase = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Optional[int]: """simple docstring""" import torch __UpperCamelCase = torch.arange(1 , __lowercase ) __UpperCamelCase = torch.remainder(__lowercase , __lowercase ) __UpperCamelCase = remainders == 0 __UpperCamelCase = candidates[divisor_indices] __UpperCamelCase = torch.max(__lowercase ) return largest_divisor, torch.div(__lowercase , __lowercase , rounding_mode='floor' ) class snake_case ( __lowerCamelCase ): """simple docstring""" @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(__A , direction='inputs' ) __UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return common_inputs @property def _lowerCamelCase ( self : int ): return self._config.num_heads def _lowerCamelCase ( self : List[str] , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ): __UpperCamelCase = super(__A , self ).generate_dummy_inputs( __A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A ) # We need to order the input in the way they appears in the forward() __UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCamelCase = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] __UpperCamelCase = common_inputs['attention_mask'] if self.use_past: __UpperCamelCase = ordered_inputs['attention_mask'].dtype __UpperCamelCase = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 ) return ordered_inputs @property def _lowerCamelCase ( self : Dict ): return 1_3
53
'''simple docstring''' import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any]=False ) -> Tuple: """simple docstring""" try: __UpperCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __UpperCamelCase = default else: # KEY is set, convert it to True or False. try: __UpperCamelCase = strtobool(__lowercase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value a__ : str =parse_flag_from_env('''RUN_SLOW''', default=False) a__ : Union[str, Any] =parse_flag_from_env('''RUN_REMOTE''', default=False) a__ : List[str] =parse_flag_from_env('''RUN_LOCAL''', default=True) a__ : Optional[int] =parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression a__ : Any =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') a__ : Optional[int] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') a__ : List[str] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio a__ : Any =pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam a__ : Tuple =pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility a__ : Union[str, Any] =pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows a__ : int =pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase__ ( __lowercase : Optional[Any] ) -> Optional[int]: """simple docstring""" try: import faiss # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires faiss' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Any: """simple docstring""" try: import regex # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires regex' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Tuple ) -> List[Any]: """simple docstring""" try: import elasticsearch # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires elasticsearch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Tuple: """simple docstring""" try: import sqlalchemy # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires sqlalchemy' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[str] ) -> List[str]: """simple docstring""" if not config.TORCH_AVAILABLE: __UpperCamelCase = unittest.skip('test requires PyTorch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[Any] ) -> List[str]: """simple docstring""" if not config.TF_AVAILABLE: __UpperCamelCase = unittest.skip('test requires TensorFlow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : int ) -> Union[str, Any]: """simple docstring""" if not config.JAX_AVAILABLE: __UpperCamelCase = unittest.skip('test requires JAX' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> Optional[Any]: """simple docstring""" if not config.PIL_AVAILABLE: __UpperCamelCase = unittest.skip('test requires Pillow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : int ) -> int: """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> int: """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> Any: """simple docstring""" def _require_spacy_model(__lowercase : Any ): try: import spacy # noqa F401 spacy.load(__lowercase ) except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(__lowercase ) )(__lowercase ) else: return test_case return _require_spacy_model def lowercase__ ( __lowercase : Union[str, Any] ) -> str: """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]: """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: __UpperCamelCase = unittest.skip('test is slow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_local_tests or _run_local_tests == 0: __UpperCamelCase = unittest.skip('test is local' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: __UpperCamelCase = unittest.skip('test is packaged' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Any: """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: __UpperCamelCase = unittest.skip('test requires remote' )(__lowercase ) return test_case def lowercase__ ( *__lowercase : Optional[Any] ) -> Tuple: """simple docstring""" def decorate(cls : int ): for name, fn in cls.__dict__.items(): if callable(__lowercase ) and name.startswith('test' ): for decorator in decorators: __UpperCamelCase = decorator(__lowercase ) setattr(cls , __lowercase , __lowercase ) return cls return decorate class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =0 SCREAMING_SNAKE_CASE_ : List[Any] =1 SCREAMING_SNAKE_CASE_ : Union[str, Any] =2 @contextmanager def lowercase__ ( __lowercase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , __lowercase : Dict=1e-16 ) -> List[Any]: """simple docstring""" __UpperCamelCase = requests.Session().request def timeout_request(__lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , **__lowercase : List[str] ): # Change the url to an invalid url so that the connection hangs __UpperCamelCase = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) __UpperCamelCase = timeout try: return online_request(__lowercase , __lowercase , **__lowercase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __UpperCamelCase = url __UpperCamelCase = e.args[0] __UpperCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]''' ),) __UpperCamelCase = (max_retry_error,) raise def raise_connection_error(__lowercase : int , __lowercase : List[str] , **__lowercase : Union[str, Any] ): raise requests.ConnectionError('Offline mode is enabled.' , request=__lowercase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , __lowercase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , __lowercase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , __lowercase ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def lowercase__ ( *__lowercase : Any , **__lowercase : Dict ) -> Dict: """simple docstring""" __UpperCamelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__lowercase , **__lowercase ) as tmp_dir: try: os.chdir(__lowercase ) yield finally: os.chdir(__lowercase ) @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase__ ( __lowercase : List[str] , __lowercase : int ) -> Union[str, Any]: """simple docstring""" return deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(__lowercase : List[Any] , *__lowercase : Tuple , **__lowercase : Union[str, Any] ): try: return func(*__lowercase , **__lowercase ) except HTTPError as err: if str(__lowercase ).startswith('500' ) or str(__lowercase ).startswith('502' ): pytest.xfail(str(__lowercase ) ) raise err return decorator.decorator(_wrapper , __lowercase ) class snake_case : """simple docstring""" def __init__( self : int , __A : Any , __A : str , __A : List[Any] ): __UpperCamelCase = returncode __UpperCamelCase = stdout __UpperCamelCase = stderr async def lowercase__ ( __lowercase : Any , __lowercase : Optional[int] ) -> str: """simple docstring""" while True: __UpperCamelCase = await stream.readline() if line: callback(__lowercase ) else: break async def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : Any=None , __lowercase : Optional[Any]=None , __lowercase : int=False , __lowercase : List[Any]=False ) -> _RunOutput: """simple docstring""" if echo: print('\nRunning: ' , ' '.join(__lowercase ) ) __UpperCamelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __UpperCamelCase = [] __UpperCamelCase = [] def tee(__lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Tuple="" ): __UpperCamelCase = line.decode('utf-8' ).rstrip() sink.append(__lowercase ) if not quiet: print(__lowercase , __lowercase , file=__lowercase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label='stderr:' ) ), ] , timeout=__lowercase , ) return _RunOutput(await p.wait() , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : int=180 , __lowercase : int=False , __lowercase : str=True ) -> _RunOutput: """simple docstring""" __UpperCamelCase = asyncio.get_event_loop() __UpperCamelCase = loop.run_until_complete( _stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) ) __UpperCamelCase = ' '.join(__lowercase ) if result.returncode > 0: __UpperCamelCase = '\n'.join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' ) return result def lowercase__ ( ) -> List[str]: """simple docstring""" __UpperCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) __UpperCamelCase = re.sub(R'^gw' , '' , __lowercase , 0 , re.M ) return int(__lowercase ) def lowercase__ ( ) -> List[Any]: """simple docstring""" __UpperCamelCase = 29500 __UpperCamelCase = pytest_xdist_worker_id() return port + uniq_delta
53
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa" SCREAMING_SNAKE_CASE_ : Dict =( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) SCREAMING_SNAKE_CASE_ : List[str] ="document_qa" SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"] SCREAMING_SNAKE_CASE_ : Any =["text"] def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ): if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*__A , **__A ) def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ): __UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' __UpperCamelCase = task_prompt.replace('{user_input}' , __A ) __UpperCamelCase = self.pre_processor.tokenizer( __A , add_special_tokens=__A , return_tensors='pt' ).input_ids __UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ): return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences def _lowerCamelCase ( self : Tuple , __A : List[Any] ): __UpperCamelCase = self.pre_processor.batch_decode(__A )[0] __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) __UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token __UpperCamelCase = self.pre_processor.tokenajson(__A ) return sequence["answer"]
53
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys a__ : Tuple ='''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
53
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] ="Salesforce/blip-image-captioning-base" SCREAMING_SNAKE_CASE_ : Union[str, Any] =( "This is a tool that generates a description of an image. It takes an input named `image` which should be the " "image to caption, and returns a text that contains the description in English." ) SCREAMING_SNAKE_CASE_ : Tuple ="image_captioner" SCREAMING_SNAKE_CASE_ : List[str] =AutoModelForVisionaSeq SCREAMING_SNAKE_CASE_ : Union[str, Any] =["image"] SCREAMING_SNAKE_CASE_ : Union[str, Any] =["text"] def __init__( self : Union[str, Any] , *__A : Union[str, Any] , **__A : Union[str, Any] ): requires_backends(self , ['vision'] ) super().__init__(*__A , **__A ) def _lowerCamelCase ( self : List[Any] , __A : "Image" ): return self.pre_processor(images=__A , return_tensors='pt' ) def _lowerCamelCase ( self : Tuple , __A : List[Any] ): return self.model.generate(**__A ) def _lowerCamelCase ( self : Optional[int] , __A : int ): return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0].strip()
53
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Tuple ) -> Tuple: """simple docstring""" return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :] def lowercase__ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] , __lowercase : List[str]="attention" ) -> Optional[Any]: """simple docstring""" __UpperCamelCase = __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] ) __UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] ) __UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] ) __UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] ) __UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : int , __lowercase : List[Any]=False ) -> Optional[Any]: """simple docstring""" if split_mlp_wi: __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :] __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :] __UpperCamelCase = (wi_a, wi_a) else: __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :] __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :] return wi, wo def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] ) -> str: """simple docstring""" return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i] def lowercase__ ( __lowercase : dict , *, __lowercase : int , __lowercase : bool , __lowercase : bool = False ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = traverse_util.flatten_dict(variables['target'] ) __UpperCamelCase = {'/'.join(__lowercase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old print('Split MLP:' , __lowercase ) __UpperCamelCase = collections.OrderedDict() # Shared embeddings. __UpperCamelCase = old['token_embedder/embedding'] # Encoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'encoder' , 'attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 1 (MLP). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_mlp_layer_norm' ) __UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'encoder' , __lowercase ) __UpperCamelCase = layer_norm if split_mlp_wi: __UpperCamelCase = wi[0].T __UpperCamelCase = wi[1].T else: __UpperCamelCase = wi.T __UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , __lowercase , 'encoder' ).T __UpperCamelCase = old['encoder/encoder_norm/scale'] if not scalable_attention: __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , 0 , 'encoder' ).T __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , 0 , 'decoder' ).T if not is_encoder_only: # Decoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_self_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'self_attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 1 (Cross Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_cross_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'encoder_decoder_attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 2 (MLP). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_mlp_layer_norm' ) __UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'decoder' , __lowercase ) __UpperCamelCase = layer_norm if split_mlp_wi: __UpperCamelCase = wi[0].T __UpperCamelCase = wi[1].T else: __UpperCamelCase = wi.T __UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase = tax_relpos_bias_lookup(__lowercase , __lowercase , 'decoder' ).T __UpperCamelCase = old['decoder/decoder_norm/scale'] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __UpperCamelCase = old['decoder/logits_dense/kernel'].T return new def lowercase__ ( __lowercase : Optional[Any] , __lowercase : bool ) -> int: """simple docstring""" __UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __UpperCamelCase = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __UpperCamelCase = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) __UpperCamelCase = state_dict['shared.weight'] return state_dict def lowercase__ ( __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = checkpoints.load_tax_checkpoint(__lowercase ) __UpperCamelCase = convert_tax_to_pytorch( __lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase , scalable_attention=__lowercase ) __UpperCamelCase = make_state_dict(__lowercase , __lowercase ) model.load_state_dict(__lowercase , strict=__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : bool = False , __lowercase : bool = False , ) -> Optional[int]: """simple docstring""" __UpperCamelCase = MTaConfig.from_json_file(__lowercase ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __UpperCamelCase = UMTaEncoderModel(__lowercase ) else: __UpperCamelCase = UMTaForConditionalGeneration(__lowercase ) # Load weights from tf checkpoint load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(__lowercase ) # Verify that we can load the checkpoint. model.from_pretrained(__lowercase ) print('Done' ) if __name__ == "__main__": a__ : List[Any] =argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) parser.add_argument( '''--scalable_attention''', action='''store_true''', help='''Whether the model uses scaled attention (umt5 model)''', default=False, ) a__ : List[str] =parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
53
1
'''simple docstring''' def lowercase__ ( __lowercase : list ) -> list: """simple docstring""" __UpperCamelCase = len(__lowercase ) for i in range(1 , __lowercase ): __UpperCamelCase = collection[i] __UpperCamelCase = 0 __UpperCamelCase = i - 1 while low <= high: __UpperCamelCase = (low + high) // 2 if val < collection[mid]: __UpperCamelCase = mid - 1 else: __UpperCamelCase = mid + 1 for j in range(__lowercase , __lowercase , -1 ): __UpperCamelCase = collection[j - 1] __UpperCamelCase = val return collection if __name__ == "__main__": a__ : Dict =input('''Enter numbers separated by a comma:\n''').strip() a__ : List[str] =[int(item) for item in user_input.split(''',''')] print(binary_insertion_sort(unsorted))
53
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ : List[Any] ="BlipImageProcessor" SCREAMING_SNAKE_CASE_ : Optional[int] =("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , __A : Optional[int] , __A : List[Any] ): __UpperCamelCase = False super().__init__(__A , __A ) __UpperCamelCase = self.image_processor def __call__( self : List[Any] , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ): if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: __UpperCamelCase = self.tokenizer __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) return text_encoding # add pixel_values __UpperCamelCase = self.image_processor(__A , return_tensors=__A ) if text is not None: __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) else: __UpperCamelCase = None if text_encoding is not None: encoding_image_processor.update(__A ) return encoding_image_processor def _lowerCamelCase ( self : List[Any] , *__A : Dict , **__A : Optional[int] ): return self.tokenizer.batch_decode(*__A , **__A ) def _lowerCamelCase ( self : List[Any] , *__A : List[str] , **__A : Dict ): return self.tokenizer.decode(*__A , **__A ) @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.tokenizer.model_input_names __UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
53
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL a__ : List[str] =logging.get_logger(__name__) def lowercase__ ( __lowercase : str , __lowercase : List[Any] ) -> Dict: """simple docstring""" __UpperCamelCase = b.T __UpperCamelCase = np.sum(np.square(__lowercase ) , axis=1 ) __UpperCamelCase = np.sum(np.square(__lowercase ) , axis=0 ) __UpperCamelCase = np.matmul(__lowercase , __lowercase ) __UpperCamelCase = aa[:, None] - 2 * ab + ba[None, :] return d def lowercase__ ( __lowercase : Tuple , __lowercase : Dict ) -> List[Any]: """simple docstring""" __UpperCamelCase = x.reshape(-1 , 3 ) __UpperCamelCase = squared_euclidean_distance(__lowercase , __lowercase ) return np.argmin(__lowercase , axis=1 ) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] =["pixel_values"] def __init__( self : Union[str, Any] , __A : Optional[Union[List[List[int]], np.ndarray]] = None , __A : bool = True , __A : Dict[str, int] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : bool = True , **__A : List[str] , ): super().__init__(**__A ) __UpperCamelCase = size if size is not None else {'height': 2_5_6, 'width': 2_5_6} __UpperCamelCase = get_size_dict(__A ) __UpperCamelCase = np.array(__A ) if clusters is not None else None __UpperCamelCase = do_resize __UpperCamelCase = size __UpperCamelCase = resample __UpperCamelCase = do_normalize __UpperCamelCase = do_color_quantize def _lowerCamelCase ( self : Optional[int] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Union[str, Any] , ): __UpperCamelCase = get_size_dict(__A ) if "height" not in size or "width" not in size: raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' ) return resize( __A , size=(size['height'], size['width']) , resample=__A , data_format=__A , **__A ) def _lowerCamelCase ( self : Dict , __A : np.ndarray , __A : Optional[Union[str, ChannelDimension]] = None , ): __UpperCamelCase = rescale(image=__A , scale=1 / 127.5 , data_format=__A ) __UpperCamelCase = image - 1 return image def _lowerCamelCase ( self : Optional[Any] , __A : ImageInput , __A : bool = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Optional[bool] = None , __A : Optional[Union[List[List[int]], np.ndarray]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__A : Optional[int] , ): __UpperCamelCase = do_resize if do_resize is not None else self.do_resize __UpperCamelCase = size if size is not None else self.size __UpperCamelCase = get_size_dict(__A ) __UpperCamelCase = resample if resample is not None else self.resample __UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize __UpperCamelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize __UpperCamelCase = clusters if clusters is not None else self.clusters __UpperCamelCase = np.array(__A ) __UpperCamelCase = make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_color_quantize and clusters is None: raise ValueError('Clusters must be specified if do_color_quantize is True.' ) # All transformations expect numpy arrays. __UpperCamelCase = [to_numpy_array(__A ) for image in images] if do_resize: __UpperCamelCase = [self.resize(image=__A , size=__A , resample=__A ) for image in images] if do_normalize: __UpperCamelCase = [self.normalize(image=__A ) for image in images] if do_color_quantize: __UpperCamelCase = [to_channel_dimension_format(__A , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) __UpperCamelCase = np.array(__A ) __UpperCamelCase = color_quantize(__A , __A ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) __UpperCamelCase = images.shape[0] __UpperCamelCase = images.reshape(__A , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. __UpperCamelCase = list(__A ) else: __UpperCamelCase = [to_channel_dimension_format(__A , __A ) for image in images] __UpperCamelCase = {'input_ids': images} return BatchFeature(data=__A , tensor_type=__A )
53
'''simple docstring''' from __future__ import annotations from typing import Any class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case : """simple docstring""" def __init__( self : List[Any] , __A : Any ): __UpperCamelCase = data __UpperCamelCase = None def __iter__( self : Optional[Any] ): __UpperCamelCase = self __UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(__A ) yield node.data __UpperCamelCase = node.next_node @property def _lowerCamelCase ( self : List[str] ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": a__ : Dict =Node(1) a__ : Optional[int] =Node(2) a__ : List[str] =Node(3) a__ : Optional[int] =Node(4) print(root_node.has_loop) # False a__ : str =root_node.next_node print(root_node.has_loop) # True a__ : Optional[int] =Node(5) a__ : List[Any] =Node(6) a__ : int =Node(5) a__ : Tuple =Node(6) print(root_node.has_loop) # False a__ : str =Node(1) print(root_node.has_loop) # False
53
1
'''simple docstring''' import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any]=False ) -> Tuple: """simple docstring""" try: __UpperCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __UpperCamelCase = default else: # KEY is set, convert it to True or False. try: __UpperCamelCase = strtobool(__lowercase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value a__ : str =parse_flag_from_env('''RUN_SLOW''', default=False) a__ : Union[str, Any] =parse_flag_from_env('''RUN_REMOTE''', default=False) a__ : List[str] =parse_flag_from_env('''RUN_LOCAL''', default=True) a__ : Optional[int] =parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression a__ : Any =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') a__ : Optional[int] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') a__ : List[str] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio a__ : Any =pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam a__ : Tuple =pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility a__ : Union[str, Any] =pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows a__ : int =pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase__ ( __lowercase : Optional[Any] ) -> Optional[int]: """simple docstring""" try: import faiss # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires faiss' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Any: """simple docstring""" try: import regex # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires regex' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Tuple ) -> List[Any]: """simple docstring""" try: import elasticsearch # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires elasticsearch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Tuple: """simple docstring""" try: import sqlalchemy # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires sqlalchemy' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[str] ) -> List[str]: """simple docstring""" if not config.TORCH_AVAILABLE: __UpperCamelCase = unittest.skip('test requires PyTorch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[Any] ) -> List[str]: """simple docstring""" if not config.TF_AVAILABLE: __UpperCamelCase = unittest.skip('test requires TensorFlow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : int ) -> Union[str, Any]: """simple docstring""" if not config.JAX_AVAILABLE: __UpperCamelCase = unittest.skip('test requires JAX' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> Optional[Any]: """simple docstring""" if not config.PIL_AVAILABLE: __UpperCamelCase = unittest.skip('test requires Pillow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : int ) -> int: """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> int: """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> Any: """simple docstring""" def _require_spacy_model(__lowercase : Any ): try: import spacy # noqa F401 spacy.load(__lowercase ) except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(__lowercase ) )(__lowercase ) else: return test_case return _require_spacy_model def lowercase__ ( __lowercase : Union[str, Any] ) -> str: """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]: """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: __UpperCamelCase = unittest.skip('test is slow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_local_tests or _run_local_tests == 0: __UpperCamelCase = unittest.skip('test is local' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: __UpperCamelCase = unittest.skip('test is packaged' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Any: """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: __UpperCamelCase = unittest.skip('test requires remote' )(__lowercase ) return test_case def lowercase__ ( *__lowercase : Optional[Any] ) -> Tuple: """simple docstring""" def decorate(cls : int ): for name, fn in cls.__dict__.items(): if callable(__lowercase ) and name.startswith('test' ): for decorator in decorators: __UpperCamelCase = decorator(__lowercase ) setattr(cls , __lowercase , __lowercase ) return cls return decorate class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =0 SCREAMING_SNAKE_CASE_ : List[Any] =1 SCREAMING_SNAKE_CASE_ : Union[str, Any] =2 @contextmanager def lowercase__ ( __lowercase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , __lowercase : Dict=1e-16 ) -> List[Any]: """simple docstring""" __UpperCamelCase = requests.Session().request def timeout_request(__lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , **__lowercase : List[str] ): # Change the url to an invalid url so that the connection hangs __UpperCamelCase = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) __UpperCamelCase = timeout try: return online_request(__lowercase , __lowercase , **__lowercase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __UpperCamelCase = url __UpperCamelCase = e.args[0] __UpperCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]''' ),) __UpperCamelCase = (max_retry_error,) raise def raise_connection_error(__lowercase : int , __lowercase : List[str] , **__lowercase : Union[str, Any] ): raise requests.ConnectionError('Offline mode is enabled.' , request=__lowercase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , __lowercase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , __lowercase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , __lowercase ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def lowercase__ ( *__lowercase : Any , **__lowercase : Dict ) -> Dict: """simple docstring""" __UpperCamelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__lowercase , **__lowercase ) as tmp_dir: try: os.chdir(__lowercase ) yield finally: os.chdir(__lowercase ) @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase__ ( __lowercase : List[str] , __lowercase : int ) -> Union[str, Any]: """simple docstring""" return deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(__lowercase : List[Any] , *__lowercase : Tuple , **__lowercase : Union[str, Any] ): try: return func(*__lowercase , **__lowercase ) except HTTPError as err: if str(__lowercase ).startswith('500' ) or str(__lowercase ).startswith('502' ): pytest.xfail(str(__lowercase ) ) raise err return decorator.decorator(_wrapper , __lowercase ) class snake_case : """simple docstring""" def __init__( self : int , __A : Any , __A : str , __A : List[Any] ): __UpperCamelCase = returncode __UpperCamelCase = stdout __UpperCamelCase = stderr async def lowercase__ ( __lowercase : Any , __lowercase : Optional[int] ) -> str: """simple docstring""" while True: __UpperCamelCase = await stream.readline() if line: callback(__lowercase ) else: break async def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : Any=None , __lowercase : Optional[Any]=None , __lowercase : int=False , __lowercase : List[Any]=False ) -> _RunOutput: """simple docstring""" if echo: print('\nRunning: ' , ' '.join(__lowercase ) ) __UpperCamelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __UpperCamelCase = [] __UpperCamelCase = [] def tee(__lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Tuple="" ): __UpperCamelCase = line.decode('utf-8' ).rstrip() sink.append(__lowercase ) if not quiet: print(__lowercase , __lowercase , file=__lowercase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label='stderr:' ) ), ] , timeout=__lowercase , ) return _RunOutput(await p.wait() , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : int=180 , __lowercase : int=False , __lowercase : str=True ) -> _RunOutput: """simple docstring""" __UpperCamelCase = asyncio.get_event_loop() __UpperCamelCase = loop.run_until_complete( _stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) ) __UpperCamelCase = ' '.join(__lowercase ) if result.returncode > 0: __UpperCamelCase = '\n'.join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' ) return result def lowercase__ ( ) -> List[str]: """simple docstring""" __UpperCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) __UpperCamelCase = re.sub(R'^gw' , '' , __lowercase , 0 , re.M ) return int(__lowercase ) def lowercase__ ( ) -> List[Any]: """simple docstring""" __UpperCamelCase = 29500 __UpperCamelCase = pytest_xdist_worker_id() return port + uniq_delta
53
'''simple docstring''' a__ : Optional[Any] =256 # Modulus to hash a string a__ : Dict =1_000_003 def lowercase__ ( __lowercase : str , __lowercase : str ) -> bool: """simple docstring""" __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) if p_len > t_len: return False __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 1 # Calculating the hash of pattern and substring of text for i in range(__lowercase ): __UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __UpperCamelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __UpperCamelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def lowercase__ ( ) -> None: """simple docstring""" __UpperCamelCase = 'abc1abc12' __UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __UpperCamelCase = 'alskfjaldsk23adsfabcabc' assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase ) # Test 2) __UpperCamelCase = 'ABABX' __UpperCamelCase = 'ABABZABABYABABX' assert rabin_karp(__lowercase , __lowercase ) # Test 3) __UpperCamelCase = 'AAAB' __UpperCamelCase = 'ABAAAAAB' assert rabin_karp(__lowercase , __lowercase ) # Test 4) __UpperCamelCase = 'abcdabcy' __UpperCamelCase = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(__lowercase , __lowercase ) # Test 5) __UpperCamelCase = 'Lü' __UpperCamelCase = 'Lüsai' assert rabin_karp(__lowercase , __lowercase ) __UpperCamelCase = 'Lue' assert not rabin_karp(__lowercase , __lowercase ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
53
1
'''simple docstring''' from functools import reduce a__ : Optional[Any] =( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def lowercase__ ( __lowercase : str = N ) -> int: """simple docstring""" return max( # mypy cannot properly interpret reduce int(reduce(lambda __lowercase , __lowercase : str(int(__lowercase ) * int(__lowercase ) ) , n[i : i + 13] ) ) for i in range(len(__lowercase ) - 12 ) ) if __name__ == "__main__": print(f'{solution() = }')
53
'''simple docstring''' from __future__ import annotations class snake_case : """simple docstring""" def __init__( self : Optional[int] , __A : list[list[int]] ): __UpperCamelCase = TypeError( 'Matrices must be formed from a list of zero or more lists containing at ' 'least one and the same number of values, each of which must be of type ' 'int or float.' ) if len(__A ) != 0: __UpperCamelCase = len(rows[0] ) if cols == 0: raise error for row in rows: if len(__A ) != cols: raise error for value in row: if not isinstance(__A , (int, float) ): raise error __UpperCamelCase = rows else: __UpperCamelCase = [] def _lowerCamelCase ( self : int ): return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def _lowerCamelCase ( self : str ): return len(self.rows ) @property def _lowerCamelCase ( self : Any ): return len(self.rows[0] ) @property def _lowerCamelCase ( self : Optional[Any] ): return (self.num_rows, self.num_columns) @property def _lowerCamelCase ( self : Dict ): return self.order[0] == self.order[1] def _lowerCamelCase ( self : Any ): __UpperCamelCase = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Any ): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def _lowerCamelCase ( self : List[str] ): return bool(self.determinant() ) def _lowerCamelCase ( self : Dict , __A : int , __A : int ): __UpperCamelCase = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(__A ).determinant() def _lowerCamelCase ( self : Dict , __A : int , __A : int ): if (row + column) % 2 == 0: return self.get_minor(__A , __A ) return -1 * self.get_minor(__A , __A ) def _lowerCamelCase ( self : List[str] ): return Matrix( [ [self.get_minor(__A , __A ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def _lowerCamelCase ( self : Union[str, Any] ): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = self.determinant() if not determinant: raise TypeError('Only matrices with a non-zero determinant have an inverse' ) return self.adjugate() * (1 / determinant) def __repr__( self : Optional[Any] ): return str(self.rows ) def __str__( self : Union[str, Any] ): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '[' + '. '.join([str(__A ) for value in row] ) + '.]' for row in self.rows ] ) + "]" ) def _lowerCamelCase ( self : List[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError('Row must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in row: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_columns: raise ValueError( 'Row must be equal in length to the other rows in the matrix' ) if position is None: self.rows.append(__A ) else: __UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:] def _lowerCamelCase ( self : Optional[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError( 'Column must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in column: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_rows: raise ValueError( 'Column must be equal in length to the other columns in the matrix' ) if position is None: __UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: __UpperCamelCase = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self : Tuple , __A : object ): if not isinstance(__A , __A ): return NotImplemented return self.rows == other.rows def __ne__( self : Any , __A : object ): return not self == other def __neg__( self : List[Any] ): return self * -1 def __add__( self : List[str] , __A : Matrix ): if self.order != other.order: raise ValueError('Addition requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self : str , __A : Matrix ): if self.order != other.order: raise ValueError('Subtraction requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self : str , __A : Matrix | int | float ): if isinstance(__A , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(__A , __A ): if self.num_columns != other.num_rows: raise ValueError( 'The number of columns in the first matrix must ' 'be equal to the number of rows in the second' ) return Matrix( [ [Matrix.dot_product(__A , __A ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( 'A Matrix can only be multiplied by an int, float, or another matrix' ) def __pow__( self : Union[str, Any] , __A : int ): if not isinstance(__A , __A ): raise TypeError('A Matrix can only be raised to the power of an int' ) if not self.is_square: raise ValueError('Only square matrices can be raised to a power' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( 'Only invertable matrices can be raised to a negative power' ) __UpperCamelCase = self for _ in range(other - 1 ): result *= self return result @classmethod def _lowerCamelCase ( cls : Tuple , __A : list[int] , __A : list[int] ): return sum(row[i] * column[i] for i in range(len(__A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
53
1
'''simple docstring''' from __future__ import annotations def lowercase__ ( __lowercase : list[int | str] ) -> None: """simple docstring""" create_state_space_tree(__lowercase , [] , 0 , [0 for i in range(len(__lowercase ) )] ) def lowercase__ ( __lowercase : list[int | str] , __lowercase : list[int | str] , __lowercase : int , __lowercase : list[int] , ) -> None: """simple docstring""" if index == len(__lowercase ): print(__lowercase ) return for i in range(len(__lowercase ) ): if not index_used[i]: current_sequence.append(sequence[i] ) __UpperCamelCase = True create_state_space_tree(__lowercase , __lowercase , index + 1 , __lowercase ) current_sequence.pop() __UpperCamelCase = False a__ : list[int | str] =[3, 1, 2, 4] generate_all_permutations(sequence) a__ : list[int | str] =["A", "B", "C"] generate_all_permutations(sequence_a)
53
'''simple docstring''' import os import numpy import onnx def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Dict: """simple docstring""" __UpperCamelCase = a.name __UpperCamelCase = b.name __UpperCamelCase = '' __UpperCamelCase = '' __UpperCamelCase = a == b __UpperCamelCase = name_a __UpperCamelCase = name_b return res def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : List[Any] ) -> Optional[int]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(__lowercase , __lowercase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) _graph_replace_input_with(node_proto.attribute[1].g , __lowercase , __lowercase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) def lowercase__ ( __lowercase : int , __lowercase : List[Any] , __lowercase : Dict ) -> int: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(__lowercase , __lowercase , __lowercase ) def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : str ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __UpperCamelCase = inits[i].name __UpperCamelCase = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = os.path.dirname(__lowercase ) __UpperCamelCase = os.path.basename(__lowercase ) __UpperCamelCase = onnx.load(os.path.join(__lowercase , __lowercase ) ) __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = set() __UpperCamelCase = {} __UpperCamelCase = [] __UpperCamelCase = 0 for i in range(len(__lowercase ) ): if i in dup_set: continue for j in range(i + 1 , len(__lowercase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(__lowercase ) dup_set.add(__lowercase ) __UpperCamelCase = inits[j].data_type __UpperCamelCase = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , __lowercase ) total_reduced_size += mem_size __UpperCamelCase = inits[i].name __UpperCamelCase = inits[j].name if name_i in dup_map: dup_map[name_i].append(__lowercase ) else: __UpperCamelCase = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) __UpperCamelCase = sorted(__lowercase ) _remove_dup_initializers_from_model(__lowercase , __lowercase , __lowercase ) __UpperCamelCase = 'optimized_' + model_file_name __UpperCamelCase = os.path.join(__lowercase , __lowercase ) onnx.save(__lowercase , __lowercase ) return new_model
53
1
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) a__ : Any =logging.getLogger() def lowercase__ ( ) -> List[Any]: """simple docstring""" __UpperCamelCase = argparse.ArgumentParser() parser.add_argument('-f' ) __UpperCamelCase = parser.parse_args() return args.f def lowercase__ ( __lowercase : Tuple ) -> Any: """simple docstring""" __UpperCamelCase = {} __UpperCamelCase = os.path.join(__lowercase , 'all_results.json' ) if os.path.exists(__lowercase ): with open(__lowercase , 'r' ) as f: __UpperCamelCase = json.load(__lowercase ) else: raise ValueError(F'''can\'t find {path}''' ) return results def lowercase__ ( ) -> Dict: """simple docstring""" __UpperCamelCase = torch.cuda.is_available() and torch_device == 'cuda' return is_using_cuda and is_apex_available() a__ : str =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class snake_case ( __lowerCamelCase ): """simple docstring""" @classmethod def _lowerCamelCase ( cls : List[Any] ): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = os.path.join(cls.tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) __UpperCamelCase = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def _lowerCamelCase ( cls : int ): shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = f''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append('--fp16' ) run_command(self._launch_args + testargs ) __UpperCamelCase = get_results(__A ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(__A , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , 'glue_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = f''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) __UpperCamelCase = get_results(__A ) self.assertLess(result['perplexity'] , 1_0_0 ) self.assertTrue(os.path.exists(os.path.join(__A , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , 'clm_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = f''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __UpperCamelCase = get_results(__A ) self.assertLess(result['perplexity'] , 4_2 ) self.assertTrue(os.path.exists(os.path.join(__A , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , 'mlm_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def _lowerCamelCase ( self : List[str] ): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __UpperCamelCase = 7 if get_gpu_count() > 1 else 2 __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = f''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __UpperCamelCase = get_results(__A ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) self.assertLess(result['train_loss'] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(__A , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , 'ner_no_trainer' ) ) ) @unittest.skip(reason='Fix me @muellerzr' ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = f''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __UpperCamelCase = get_results(__A ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['eval_f1'] , 2_8 ) self.assertGreaterEqual(result['eval_exact'] , 2_8 ) self.assertTrue(os.path.exists(os.path.join(__A , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , 'qa_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = f''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) __UpperCamelCase = get_results(__A ) self.assertGreaterEqual(result['eval_accuracy'] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(__A , 'swag_no_trainer' ) ) ) @slow @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = f''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __UpperCamelCase = get_results(__A ) self.assertGreaterEqual(result['eval_rouge1'] , 1_0 ) self.assertGreaterEqual(result['eval_rouge2'] , 2 ) self.assertGreaterEqual(result['eval_rougeL'] , 7 ) self.assertGreaterEqual(result['eval_rougeLsum'] , 7 ) self.assertTrue(os.path.exists(os.path.join(__A , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , 'summarization_no_trainer' ) ) ) @slow @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def _lowerCamelCase ( self : int ): __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = f''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __UpperCamelCase = get_results(__A ) self.assertGreaterEqual(result['eval_bleu'] , 3_0 ) self.assertTrue(os.path.exists(os.path.join(__A , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , 'translation_no_trainer' ) ) ) @slow def _lowerCamelCase ( self : int ): __UpperCamelCase = logging.StreamHandler(sys.stdout ) logger.addHandler(__A ) __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = f''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) __UpperCamelCase = get_results(__A ) self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.10 ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = f''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append('--fp16' ) run_command(self._launch_args + testargs ) __UpperCamelCase = get_results(__A ) # The base model scores a 25% self.assertGreaterEqual(result['eval_accuracy'] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(__A , 'step_1' ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , 'image_classification_no_trainer' ) ) )
53
'''simple docstring''' import random def lowercase__ ( __lowercase : list , __lowercase : Optional[Any] ) -> tuple: """simple docstring""" __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = [], [], [] for element in data: if element < pivot: less.append(__lowercase ) elif element > pivot: greater.append(__lowercase ) else: equal.append(__lowercase ) return less, equal, greater def lowercase__ ( __lowercase : list , __lowercase : int ) -> Dict: """simple docstring""" if index >= len(__lowercase ) or index < 0: return None __UpperCamelCase = items[random.randint(0 , len(__lowercase ) - 1 )] __UpperCamelCase = 0 __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = _partition(__lowercase , __lowercase ) __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__lowercase , __lowercase ) # must be in larger else: return quick_select(__lowercase , index - (m + count) )
53
1
'''simple docstring''' a__ : dict[str, float] ={ "joule": 1.0, "kilojoule": 1_000, "megajoule": 1_000_000, "gigajoule": 1_000_000_000, "wattsecond": 1.0, "watthour": 3_600, "kilowatthour": 3_600_000, "newtonmeter": 1.0, "calorie_nutr": 4_186.8, "kilocalorie_nutr": 4_186_800.00, "electronvolt": 1.602176634E-19, "britishthermalunit_it": 1_055.05_585, "footpound": 1.355_818, } def lowercase__ ( __lowercase : str , __lowercase : str , __lowercase : float ) -> float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: __UpperCamelCase = ( F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' F'''Valid values are: {', '.join(__lowercase )}''' ) raise ValueError(__lowercase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
53
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowercase__ ( __lowercase : Any ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def lowercase__ ( __lowercase : Tuple ) -> int: """simple docstring""" __UpperCamelCase , __UpperCamelCase = emb.weight.shape __UpperCamelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) __UpperCamelCase = emb.weight.data return lin_layer def lowercase__ ( __lowercase : int , __lowercase : List[str]="facebook/mbart-large-en-ro" , __lowercase : str=False , __lowercase : List[Any]=False ) -> int: """simple docstring""" __UpperCamelCase = torch.load(__lowercase , map_location='cpu' )['model'] remove_ignore_keys_(__lowercase ) __UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0] __UpperCamelCase = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase ) if mbart_aa and finetuned: __UpperCamelCase = 'relu' __UpperCamelCase = state_dict['decoder.embed_tokens.weight'] __UpperCamelCase = MBartForConditionalGeneration(__lowercase ) model.model.load_state_dict(__lowercase ) if finetuned: __UpperCamelCase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a__ : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') a__ : Union[str, Any] =parser.parse_args() a__ : str =convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
53
1
'''simple docstring''' import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class snake_case ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int =PhobertTokenizer SCREAMING_SNAKE_CASE_ : Union[str, Any] =False def _lowerCamelCase ( self : Any ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCamelCase = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@'] __UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) ) __UpperCamelCase = ['#version: 0.2', 'l à</w>'] __UpperCamelCase = {'unk_token': '<unk>'} __UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: for token in vocab_tokens: fp.write(f'''{token} {vocab_tokens[token]}\n''' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__A ) ) def _lowerCamelCase ( self : Dict , **__A : Tuple ): kwargs.update(self.special_tokens_map ) return PhobertTokenizer.from_pretrained(self.tmpdirname , **__A ) def _lowerCamelCase ( self : str , __A : List[Any] ): __UpperCamelCase = 'Tôi là VinAI Research' __UpperCamelCase = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>' return input_text, output_text def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __UpperCamelCase = 'Tôi là VinAI Research' __UpperCamelCase = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split() __UpperCamelCase = tokenizer.tokenize(__A ) print(__A ) self.assertListEqual(__A , __A ) __UpperCamelCase = tokens + [tokenizer.unk_token] __UpperCamelCase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
53
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : Any , __A : Dict , __A : str , __A : List[Any]=1_0_2_4 , __A : Tuple=1_0_2_4 , __A : str=3.6 ): __UpperCamelCase = tokenizer __UpperCamelCase = tokenizer.bos_token_id __UpperCamelCase = dataset __UpperCamelCase = seq_length __UpperCamelCase = seq_length * chars_per_token * num_of_sequences def __iter__( self : Any ): __UpperCamelCase = iter(self.dataset ) __UpperCamelCase = True while more_examples: __UpperCamelCase , __UpperCamelCase = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(__A )['content'] ) buffer_len += len(buffer[-1] ) except StopIteration: __UpperCamelCase = False break __UpperCamelCase = tokenizer(__A , truncation=__A )['input_ids'] __UpperCamelCase = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(__A ) , self.seq_length ): __UpperCamelCase = all_token_ids[i : i + self.seq_length] if len(__A ) == self.seq_length: yield torch.tensor(__A ) def lowercase__ ( __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = {'streaming': True} __UpperCamelCase = load_dataset(args.dataset_name , split='train' , **__lowercase ) __UpperCamelCase = ConstantLengthDataset(__lowercase , __lowercase , seq_length=args.seq_length ) __UpperCamelCase = DataLoader(__lowercase , batch_size=args.batch_size ) return eval_dataloader def lowercase__ ( __lowercase : Tuple ) -> Optional[Any]: """simple docstring""" model.eval() __UpperCamelCase = [] for step, batch in enumerate(__lowercase ): with torch.no_grad(): __UpperCamelCase = model(__lowercase , labels=__lowercase ) __UpperCamelCase = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__lowercase ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break __UpperCamelCase = torch.mean(torch.cat(__lowercase ) ) try: __UpperCamelCase = torch.exp(__lowercase ) except OverflowError: __UpperCamelCase = float('inf' ) return loss.item(), perplexity.item() # Setup Accelerator a__ : int =Accelerator() # Parse configuration a__ : Dict =HfArgumentParser(EvaluationArguments) a__ : Union[str, Any] =parser.parse_args() set_seed(args.seed) # Logging a__ : List[Any] =logging.getLogger(__name__) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) # Load model and tokenizer a__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(args.model_ckpt) a__ : List[Any] =AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader a__ : Union[str, Any] =create_dataloader(args) # Prepare everything with our `accelerator`. a__ , a__ : List[str] =accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('''Evaluating and saving model after training''') a__ , a__ : Any =evaluate(args) logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
53
1
'''simple docstring''' import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch a__ : int =logging.get_logger(__name__) class snake_case : """simple docstring""" def __init__( self : Any , __A : str = None , __A : uuid.UUID = None , __A : List[Any]=None , __A : Optional[int]=None ): if not conversation_id: __UpperCamelCase = uuid.uuida() if past_user_inputs is None: __UpperCamelCase = [] if generated_responses is None: __UpperCamelCase = [] __UpperCamelCase = conversation_id __UpperCamelCase = past_user_inputs __UpperCamelCase = generated_responses __UpperCamelCase = text def __eq__( self : str , __A : int ): if not isinstance(__A , __A ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def _lowerCamelCase ( self : str , __A : str , __A : bool = False ): if self.new_user_input: if overwrite: logger.warning( f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' f'''with: "{text}".''' ) __UpperCamelCase = text else: logger.warning( f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: __UpperCamelCase = text def _lowerCamelCase ( self : Optional[Any] ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __UpperCamelCase = None def _lowerCamelCase ( self : Dict , __A : str ): self.generated_responses.append(__A ) def _lowerCamelCase ( self : Tuple ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self : Tuple ): __UpperCamelCase = f'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): __UpperCamelCase = 'user' if is_user else 'bot' output += f'''{name} >> {text} \n''' return output @add_end_docstrings( __lowerCamelCase , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , ) class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : Tuple , *__A : Union[str, Any] , **__A : Any ): super().__init__(*__A , **__A ) if self.tokenizer.pad_token_id is None: __UpperCamelCase = self.tokenizer.eos_token def _lowerCamelCase ( self : int , __A : int=None , __A : Union[str, Any]=None , __A : Tuple=None , **__A : List[Any] ): __UpperCamelCase = {} __UpperCamelCase = {} __UpperCamelCase = {} if min_length_for_response is not None: __UpperCamelCase = min_length_for_response if minimum_tokens is not None: __UpperCamelCase = minimum_tokens if "max_length" in generate_kwargs: __UpperCamelCase = generate_kwargs['max_length'] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __UpperCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__A ) return preprocess_params, forward_params, postprocess_params def __call__( self : Any , __A : Union[Conversation, List[Conversation]] , __A : List[Any]=0 , **__A : Any ): __UpperCamelCase = super().__call__(__A , num_workers=__A , **__A ) if isinstance(__A , __A ) and len(__A ) == 1: return outputs[0] return outputs def _lowerCamelCase ( self : List[str] , __A : Conversation , __A : Tuple=3_2 ): if not isinstance(__A , __A ): raise ValueError('ConversationalPipeline, expects Conversation as inputs' ) if conversation.new_user_input is None: raise ValueError( f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' 'Add user inputs with the conversation\'s `add_user_input` method' ) if hasattr(self.tokenizer , '_build_conversation_input_ids' ): __UpperCamelCase = self.tokenizer._build_conversation_input_ids(__A ) else: # If the tokenizer cannot handle conversations, we default to only the old version __UpperCamelCase = self._legacy_parse_and_tokenize(__A ) if self.framework == "pt": __UpperCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": __UpperCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def _lowerCamelCase ( self : str , __A : List[Any] , __A : List[Any]=1_0 , **__A : str ): __UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length ) __UpperCamelCase = model_inputs['input_ids'].shape[1] if max_length - minimum_tokens < n: logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) __UpperCamelCase = max_length - minimum_tokens __UpperCamelCase = model_inputs['input_ids'][:, -trim:] if "attention_mask" in model_inputs: __UpperCamelCase = model_inputs['attention_mask'][:, -trim:] __UpperCamelCase = model_inputs.pop('conversation' ) __UpperCamelCase = max_length __UpperCamelCase = self.model.generate(**__A , **__A ) if self.model.config.is_encoder_decoder: __UpperCamelCase = 1 else: __UpperCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def _lowerCamelCase ( self : Optional[int] , __A : Union[str, Any] , __A : Optional[Any]=True ): __UpperCamelCase = model_outputs['output_ids'] __UpperCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=__A , clean_up_tokenization_spaces=__A , ) __UpperCamelCase = model_outputs['conversation'] conversation.mark_processed() conversation.append_response(__A ) return conversation def _lowerCamelCase ( self : List[Any] , __A : Conversation ): __UpperCamelCase = self.tokenizer.eos_token_id __UpperCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__A , add_special_tokens=__A ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__A , add_special_tokens=__A ) ) if len(__A ) > self.tokenizer.model_max_length: __UpperCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
53
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging a__ : Any =logging.get_logger(__name__) a__ : Optional[Any] ={ '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict ="gpt_neo" SCREAMING_SNAKE_CASE_ : Optional[int] =["past_key_values"] SCREAMING_SNAKE_CASE_ : List[Any] ={"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Union[str, Any] , __A : Union[str, Any]=5_0_2_5_7 , __A : Any=2_0_4_8 , __A : Optional[Any]=2_0_4_8 , __A : Any=2_4 , __A : Union[str, Any]=[[["global", "local"], 1_2]] , __A : str=1_6 , __A : Optional[int]=None , __A : Union[str, Any]=2_5_6 , __A : Any="gelu_new" , __A : Dict=0.0 , __A : Optional[int]=0.0 , __A : int=0.0 , __A : List[str]=0.1 , __A : Any=1e-5 , __A : int=0.02 , __A : List[str]=True , __A : Tuple=5_0_2_5_6 , __A : Optional[Any]=5_0_2_5_6 , **__A : Optional[Any] , ): __UpperCamelCase = vocab_size __UpperCamelCase = max_position_embeddings __UpperCamelCase = hidden_size __UpperCamelCase = num_layers __UpperCamelCase = num_heads __UpperCamelCase = intermediate_size __UpperCamelCase = window_size __UpperCamelCase = activation_function __UpperCamelCase = resid_dropout __UpperCamelCase = embed_dropout __UpperCamelCase = attention_dropout __UpperCamelCase = classifier_dropout __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = initializer_range __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id __UpperCamelCase = attention_types __UpperCamelCase = self.expand_attention_types_params(__A ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=__A , eos_token_id=__A , **__A ) @staticmethod def _lowerCamelCase ( __A : Tuple ): __UpperCamelCase = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase__ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Any: """simple docstring""" import torch __UpperCamelCase = input.size() __UpperCamelCase = len(__lowercase ) __UpperCamelCase = shape[dimension] __UpperCamelCase = torch.arange(0 , __lowercase , __lowercase ) __UpperCamelCase = torch.div(sizedim - size , __lowercase , rounding_mode='floor' ) + 1 __UpperCamelCase = torch.arange(__lowercase ) + low_indices[:min_length][:, None] __UpperCamelCase = [slice(__lowercase )] * rank __UpperCamelCase = indices __UpperCamelCase = input[s] __UpperCamelCase = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Optional[int]: """simple docstring""" import torch __UpperCamelCase = torch.arange(1 , __lowercase ) __UpperCamelCase = torch.remainder(__lowercase , __lowercase ) __UpperCamelCase = remainders == 0 __UpperCamelCase = candidates[divisor_indices] __UpperCamelCase = torch.max(__lowercase ) return largest_divisor, torch.div(__lowercase , __lowercase , rounding_mode='floor' ) class snake_case ( __lowerCamelCase ): """simple docstring""" @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(__A , direction='inputs' ) __UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return common_inputs @property def _lowerCamelCase ( self : int ): return self._config.num_heads def _lowerCamelCase ( self : List[str] , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ): __UpperCamelCase = super(__A , self ).generate_dummy_inputs( __A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A ) # We need to order the input in the way they appears in the forward() __UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCamelCase = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] __UpperCamelCase = common_inputs['attention_mask'] if self.use_past: __UpperCamelCase = ordered_inputs['attention_mask'].dtype __UpperCamelCase = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 ) return ordered_inputs @property def _lowerCamelCase ( self : Dict ): return 1_3
53
1
'''simple docstring''' def lowercase__ ( __lowercase : int = 10**9 ) -> int: """simple docstring""" __UpperCamelCase = 1 __UpperCamelCase = 2 __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'{solution() = }')
53
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa" SCREAMING_SNAKE_CASE_ : Dict =( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) SCREAMING_SNAKE_CASE_ : List[str] ="document_qa" SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"] SCREAMING_SNAKE_CASE_ : Any =["text"] def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ): if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*__A , **__A ) def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ): __UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' __UpperCamelCase = task_prompt.replace('{user_input}' , __A ) __UpperCamelCase = self.pre_processor.tokenizer( __A , add_special_tokens=__A , return_tensors='pt' ).input_ids __UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ): return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences def _lowerCamelCase ( self : Tuple , __A : List[Any] ): __UpperCamelCase = self.pre_processor.batch_decode(__A )[0] __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) __UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token __UpperCamelCase = self.pre_processor.tokenajson(__A ) return sequence["answer"]
53
1
'''simple docstring''' import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict =["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ : Optional[int] ="BlipImageProcessor" SCREAMING_SNAKE_CASE_ : Optional[int] ="AutoTokenizer" def __init__( self : Dict , __A : Optional[int] , __A : Union[str, Any] , __A : Any ): super().__init__(__A , __A ) # add QFormer tokenizer __UpperCamelCase = qformer_tokenizer def __call__( self : str , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ): if images is None and text is None: raise ValueError('You have to specify at least images or text.' ) __UpperCamelCase = BatchFeature() if text is not None: __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) encoding.update(__A ) __UpperCamelCase = self.qformer_tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) __UpperCamelCase = qformer_text_encoding.pop('input_ids' ) __UpperCamelCase = qformer_text_encoding.pop('attention_mask' ) if images is not None: __UpperCamelCase = self.image_processor(__A , return_tensors=__A ) encoding.update(__A ) return encoding def _lowerCamelCase ( self : List[str] , *__A : Dict , **__A : Dict ): return self.tokenizer.batch_decode(*__A , **__A ) def _lowerCamelCase ( self : Optional[Any] , *__A : Union[str, Any] , **__A : Optional[int] ): return self.tokenizer.decode(*__A , **__A ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = self.tokenizer.model_input_names __UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def _lowerCamelCase ( self : Union[str, Any] , __A : Union[str, Any] , **__A : Dict ): if os.path.isfile(__A ): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , 'qformer_tokenizer' ) self.qformer_tokenizer.save_pretrained(__A ) return super().save_pretrained(__A , **__A ) @classmethod def _lowerCamelCase ( cls : List[Any] , __A : int , **__A : Dict ): __UpperCamelCase = AutoTokenizer.from_pretrained(__A , subfolder='qformer_tokenizer' ) __UpperCamelCase = cls._get_arguments_from_pretrained(__A , **__A ) args.append(__A ) return cls(*__A )
53
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
53
1
'''simple docstring''' import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def lowercase__ ( __lowercase : List[Any] ) -> Optional[int]: """simple docstring""" def wrapper(*__lowercase : List[Any] , **__lowercase : Any ): __UpperCamelCase = timeit.default_timer() __UpperCamelCase = func(*__lowercase , **__lowercase ) __UpperCamelCase = timeit.default_timer() - starttime return delta __UpperCamelCase = func.__name__ return wrapper def lowercase__ ( __lowercase : dict , __lowercase : Optional[Any]=100 , __lowercase : Dict=None ) -> List[str]: """simple docstring""" __UpperCamelCase = [] __UpperCamelCase = seq_shapes or {} for i in range(__lowercase ): __UpperCamelCase = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__lowercase , _ArrayXD ): __UpperCamelCase = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__lowercase , datasets.Value ): if v.dtype == "string": __UpperCamelCase = 'The small grey turtle was surprisingly fast when challenged.' else: __UpperCamelCase = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(__lowercase , datasets.Sequence ): while isinstance(__lowercase , datasets.Sequence ): __UpperCamelCase = v.feature __UpperCamelCase = seq_shapes[k] __UpperCamelCase = np.random.rand(*__lowercase ).astype(v.dtype ) __UpperCamelCase = data dummy_data.append((i, example) ) return dummy_data def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : str=100 , __lowercase : List[str]=None ) -> Dict: """simple docstring""" __UpperCamelCase = generate_examples(__lowercase , num_examples=__lowercase , seq_shapes=__lowercase ) with ArrowWriter(features=__lowercase , path=__lowercase ) as writer: for key, record in dummy_data: __UpperCamelCase = features.encode_example(__lowercase ) writer.write(__lowercase ) __UpperCamelCase , __UpperCamelCase = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' ) __UpperCamelCase = datasets.Dataset.from_file(filename=__lowercase , info=datasets.DatasetInfo(features=__lowercase ) ) return dataset
53
'''simple docstring''' import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase__ ( __lowercase : Features ) -> Optional[int]: """simple docstring""" __UpperCamelCase = np.inf def set_batch_size(__lowercase : FeatureType ) -> None: nonlocal batch_size if isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary": __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__lowercase , __lowercase ) return None if batch_size is np.inf else batch_size class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : List[str] , __A : NestedDataStructureLike[PathLike] , __A : Optional[NamedSplit] = None , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[int] = None , **__A : Dict , ): super().__init__( __A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , ) __UpperCamelCase = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths} __UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1] __UpperCamelCase = Parquet( cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , ) def _lowerCamelCase ( self : Optional[int] ): # Build iterable dataset if self.streaming: __UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None self.builder.download_and_prepare( download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , ) __UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=__A , in_memory=self.keep_in_memory ) return dataset class snake_case : """simple docstring""" def __init__( self : List[str] , __A : Dataset , __A : Union[PathLike, BinaryIO] , __A : Optional[int] = None , **__A : Dict , ): __UpperCamelCase = dataset __UpperCamelCase = path_or_buf __UpperCamelCase = batch_size or get_writer_batch_size(dataset.features ) __UpperCamelCase = parquet_writer_kwargs def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , 'wb+' ) as buffer: __UpperCamelCase = self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs ) else: __UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs ) return written def _lowerCamelCase ( self : List[str] , __A : BinaryIO , __A : int , **__A : List[str] ): __UpperCamelCase = 0 __UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __A ) __UpperCamelCase = self.dataset.features.arrow_schema __UpperCamelCase = pq.ParquetWriter(__A , schema=__A , **__A ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): __UpperCamelCase = query_table( table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__A ) written += batch.nbytes writer.close() return written
53
1
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class snake_case ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , __A : Union[str, Any] , __A : Tuple=1_3 , __A : int=7 , __A : Union[str, Any]=True , __A : Tuple=True , __A : Any=True , __A : Optional[Any]=True , __A : Union[str, Any]=9_9 , __A : Dict=3_2 , __A : Tuple=5 , __A : Union[str, Any]=4 , __A : Union[str, Any]=3_7 , __A : Optional[int]="gelu" , __A : str=0.1 , __A : Dict=0.1 , __A : str=5_1_2 , __A : Any=1_6 , __A : int=2 , __A : List[str]=0.02 , __A : List[Any]=4 , ): __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_attention_mask __UpperCamelCase = use_token_type_ids __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = type_sequence_label_size __UpperCamelCase = initializer_range __UpperCamelCase = num_choices def _lowerCamelCase ( self : Union[str, Any] ): __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = None if self.use_attention_mask: __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase = None if self.use_token_type_ids: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCamelCase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCamelCase ( self : str ): __UpperCamelCase = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs __UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class snake_case ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int =True SCREAMING_SNAKE_CASE_ : List[str] =( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self : str ): __UpperCamelCase = FlaxRoFormerModelTester(self ) @slow def _lowerCamelCase ( self : Union[str, Any] ): for model_class_name in self.all_model_classes: __UpperCamelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=__A ) __UpperCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(__A ) @require_flax class snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCamelCase ( self : Dict ): __UpperCamelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) __UpperCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] ) __UpperCamelCase = model(__A )[0] __UpperCamelCase = 5_0_0_0_0 __UpperCamelCase = (1, 6, vocab_size) self.assertEqual(output.shape , __A ) __UpperCamelCase = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
53
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def lowercase__ ( __lowercase : SplitDict ) -> int: """simple docstring""" __UpperCamelCase = split_dict._to_yaml_list() assert len(__lowercase ) == len(__lowercase ) __UpperCamelCase = SplitDict._from_yaml_list(__lowercase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump __UpperCamelCase = None # the split name of split_dict takes over the name of the split info object __UpperCamelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] ) def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" __UpperCamelCase = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
53
1
'''simple docstring''' import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def lowercase__ ( __lowercase : Dataset , __lowercase : Dict[str, str] ) -> Tuple: """simple docstring""" __UpperCamelCase = args.log_outputs __UpperCamelCase = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric __UpperCamelCase = load_metric('wer' ) __UpperCamelCase = load_metric('cer' ) # compute metrics __UpperCamelCase = wer.compute(references=result['target'] , predictions=result['prediction'] ) __UpperCamelCase = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results __UpperCamelCase = F'''WER: {wer_result}\nCER: {cer_result}''' print(__lowercase ) with open(F'''{dataset_id}_eval_results.txt''' , 'w' ) as f: f.write(__lowercase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: __UpperCamelCase = F'''log_{dataset_id}_predictions.txt''' __UpperCamelCase = F'''log_{dataset_id}_targets.txt''' with open(__lowercase , 'w' ) as p, open(__lowercase , 'w' ) as t: # mapping function to write output def write_to_file(__lowercase : int , __lowercase : int ): p.write(F'''{i}''' + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(F'''{i}''' + '\n' ) t.write(batch['target'] + '\n' ) result.map(__lowercase , with_indices=__lowercase ) def lowercase__ ( __lowercase : str ) -> str: """simple docstring""" __UpperCamelCase = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training __UpperCamelCase = re.sub(__lowercase , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! __UpperCamelCase = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: __UpperCamelCase = ' '.join(text.split(__lowercase ) ) return text def lowercase__ ( __lowercase : Any ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__lowercase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor __UpperCamelCase = AutoFeatureExtractor.from_pretrained(args.model_id ) __UpperCamelCase = feature_extractor.sampling_rate # resample audio __UpperCamelCase = dataset.cast_column('audio' , Audio(sampling_rate=__lowercase ) ) # load eval pipeline if args.device is None: __UpperCamelCase = 0 if torch.cuda.is_available() else -1 __UpperCamelCase = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(__lowercase : Dict ): __UpperCamelCase = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) __UpperCamelCase = prediction['text'] __UpperCamelCase = normalize_text(batch['sentence'] ) return batch # run inference on all examples __UpperCamelCase = dataset.map(__lowercase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__lowercase , __lowercase ) if __name__ == "__main__": a__ : Any =argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) a__ : Any =parser.parse_args() main(args)
53
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[str] ={ '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any =[ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
1
'''simple docstring''' import random def lowercase__ ( __lowercase : list , __lowercase : Optional[Any] ) -> tuple: """simple docstring""" __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = [], [], [] for element in data: if element < pivot: less.append(__lowercase ) elif element > pivot: greater.append(__lowercase ) else: equal.append(__lowercase ) return less, equal, greater def lowercase__ ( __lowercase : list , __lowercase : int ) -> Dict: """simple docstring""" if index >= len(__lowercase ) or index < 0: return None __UpperCamelCase = items[random.randint(0 , len(__lowercase ) - 1 )] __UpperCamelCase = 0 __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = _partition(__lowercase , __lowercase ) __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__lowercase , __lowercase ) # must be in larger else: return quick_select(__lowercase , index - (m + count) )
53
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a__ : str =logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str =["input_features", "attention_mask"] def __init__( self : Union[str, Any] , __A : Optional[int]=8_0 , __A : Tuple=1_6_0_0_0 , __A : Optional[Any]=8_0 , __A : Any=0.0 , __A : Any=True , __A : List[str]=True , __A : str=True , **__A : List[Any] , ): super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A ) __UpperCamelCase = num_mel_bins __UpperCamelCase = do_ceptral_normalize __UpperCamelCase = normalize_means __UpperCamelCase = normalize_vars __UpperCamelCase = True def _lowerCamelCase ( self : Union[str, Any] , __A : np.ndarray , ): __UpperCamelCase = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers __UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 ) __UpperCamelCase = ta_kaldi.fbank(__A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _lowerCamelCase ( __A : np.ndarray , __A : int , __A : Optional[bool] = True , __A : Optional[bool] = True , __A : float = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: __UpperCamelCase = x[:input_length].mean(axis=0 ) __UpperCamelCase = np.subtract(__A , __A ) if normalize_vars: __UpperCamelCase = x[:input_length].std(axis=0 ) __UpperCamelCase = np.divide(__A , __A ) if input_length < x.shape[0]: __UpperCamelCase = padding_value # make sure array is in float32 __UpperCamelCase = x.astype(np.floataa ) return x def _lowerCamelCase ( self : int , __A : List[np.ndarray] , __A : Optional[np.ndarray] = None ): __UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__A , __A , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(__A , __A ) ] def __call__( self : List[Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : Union[bool, str, PaddingStrategy] = False , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[int] = None , __A : Optional[bool] = None , **__A : Dict , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __UpperCamelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) __UpperCamelCase = is_batched_numpy or ( isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__A , np.ndarray ): __UpperCamelCase = np.asarray(__A , dtype=np.floataa ) elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __UpperCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __UpperCamelCase = [raw_speech] # extract fbank features __UpperCamelCase = [self._extract_fbank_features(__A ) for waveform in raw_speech] # convert into correct format for padding __UpperCamelCase = BatchFeature({'input_features': features} ) __UpperCamelCase = self.pad( __A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , ) # make sure list is in array format __UpperCamelCase = padded_inputs.get('input_features' ) if isinstance(input_features[0] , __A ): __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_features] __UpperCamelCase = padded_inputs.get('attention_mask' ) if attention_mask is not None: __UpperCamelCase = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __UpperCamelCase = ( np.array(__A , dtype=np.intaa ) if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD else None ) __UpperCamelCase = self.normalize( padded_inputs['input_features'] , attention_mask=__A ) if return_tensors is not None: __UpperCamelCase = padded_inputs.convert_to_tensors(__A ) return padded_inputs
53
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging a__ : Dict =logging.get_logger(__name__) a__ : Optional[int] ={ '''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] ="perceiver" def __init__( self : Any , __A : Any=2_5_6 , __A : Tuple=1_2_8_0 , __A : Dict=7_6_8 , __A : Optional[Any]=1 , __A : str=2_6 , __A : Optional[Any]=8 , __A : Dict=8 , __A : Any=None , __A : Tuple=None , __A : Optional[int]="kv" , __A : Optional[int]=1 , __A : Any=1 , __A : Any="gelu" , __A : Dict=0.1 , __A : Any=0.02 , __A : int=1e-12 , __A : Optional[int]=True , __A : Optional[Any]=2_6_2 , __A : List[Any]=2_0_4_8 , __A : List[str]=5_6 , __A : Optional[int]=[3_6_8, 4_9_6] , __A : List[Any]=1_6 , __A : Optional[int]=1_9_2_0 , __A : Optional[int]=1_6 , __A : Dict=[1, 1_6, 2_2_4, 2_2_4] , **__A : Union[str, Any] , ): super().__init__(**__A ) __UpperCamelCase = num_latents __UpperCamelCase = d_latents __UpperCamelCase = d_model __UpperCamelCase = num_blocks __UpperCamelCase = num_self_attends_per_block __UpperCamelCase = num_self_attention_heads __UpperCamelCase = num_cross_attention_heads __UpperCamelCase = qk_channels __UpperCamelCase = v_channels __UpperCamelCase = cross_attention_shape_for_attention __UpperCamelCase = self_attention_widening_factor __UpperCamelCase = cross_attention_widening_factor __UpperCamelCase = hidden_act __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = use_query_residual # masked language modeling attributes __UpperCamelCase = vocab_size __UpperCamelCase = max_position_embeddings # image classification attributes __UpperCamelCase = image_size # flow attributes __UpperCamelCase = train_size # multimodal autoencoding attributes __UpperCamelCase = num_frames __UpperCamelCase = audio_samples_per_frame __UpperCamelCase = samples_per_patch __UpperCamelCase = output_shape class snake_case ( __lowerCamelCase ): """simple docstring""" @property def _lowerCamelCase ( self : Union[str, Any] ): if self.task == "multiple-choice": __UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('inputs', dynamic_axis), ('attention_mask', dynamic_axis), ] ) @property def _lowerCamelCase ( self : Optional[Any] ): return 1e-4 def _lowerCamelCase ( self : Any , __A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __A : int = -1 , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , __A : int = 3 , __A : int = 4_0 , __A : int = 4_0 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(__A , __A ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __UpperCamelCase = compute_effective_axis_dimension( __A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __UpperCamelCase = preprocessor.num_special_tokens_to_add(__A ) __UpperCamelCase = compute_effective_axis_dimension( __A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__A ) # Generate dummy inputs according to compute batch and sequence __UpperCamelCase = [' '.join(['a'] ) * seq_length] * batch_size __UpperCamelCase = dict(preprocessor(__A , return_tensors=__A ) ) __UpperCamelCase = inputs.pop('input_ids' ) return inputs elif isinstance(__A , __A ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __UpperCamelCase = compute_effective_axis_dimension(__A , fixed_dimension=OnnxConfig.default_fixed_batch ) __UpperCamelCase = self._generate_dummy_images(__A , __A , __A , __A ) __UpperCamelCase = dict(preprocessor(images=__A , return_tensors=__A ) ) __UpperCamelCase = inputs.pop('pixel_values' ) return inputs else: raise ValueError( 'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
53
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : List[Any] =logging.get_logger(__name__) a__ : List[Any] ={ '''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model" def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ): super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = project_dim class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model" def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ): super().__init__(**__A ) __UpperCamelCase = hidden_size __UpperCamelCase = intermediate_size __UpperCamelCase = projection_dim __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = num_channels __UpperCamelCase = patch_size __UpperCamelCase = image_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = attention_dropout __UpperCamelCase = layer_norm_eps __UpperCamelCase = hidden_act @classmethod def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ): cls._set_token_in_kwargs(__A ) __UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('model_type' ) == "altclip": __UpperCamelCase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] ="altclip" SCREAMING_SNAKE_CASE_ : Optional[int] =True def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). __UpperCamelCase = kwargs.pop('text_config_dict' , __A ) __UpperCamelCase = kwargs.pop('vision_config_dict' , __A ) super().__init__(**__A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __UpperCamelCase = {} # This is the complete result when using `text_config_dict`. __UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __UpperCamelCase = {} # This is the complete result when using `vision_config_dict`. __UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __UpperCamelCase = { str(__A ): value for key, value in _vision_config_dict['id2label'].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __UpperCamelCase = {} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' ) if vision_config is None: __UpperCamelCase = {} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' ) __UpperCamelCase = AltCLIPTextConfig(**__A ) __UpperCamelCase = AltCLIPVisionConfig(**__A ) __UpperCamelCase = projection_dim __UpperCamelCase = logit_scale_init_value __UpperCamelCase = 1.0 @classmethod def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = copy.deepcopy(self.__dict__ ) __UpperCamelCase = self.text_config.to_dict() __UpperCamelCase = self.vision_config.to_dict() __UpperCamelCase = self.__class__.model_type return output
53
1