code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: lowerCamelCase : List[Any] = None lowerCamelCase : Optional[Any] = logging.get_logger(__name__) lowerCamelCase : List[str] = '''▁''' lowerCamelCase : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase : Union[str, Any] = { '''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}, '''tokenizer_file''': { '''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json''' }, } lowerCamelCase : Any = { '''google/pegasus-xsum''': 5_12, } class lowerCAmelCase ( __a ): '''simple docstring''' _A : int = VOCAB_FILES_NAMES _A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : Tuple = PegasusTokenizer _A : Union[str, Any] = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[int] , __a : List[Any]=None , __a : Optional[Any]=None , __a : Optional[int]="<pad>" , __a : List[Any]="</s>" , __a : int="<unk>" , __a : Dict="<mask_2>" , __a : Tuple="<mask_1>" , __a : str=None , __a : int=103 , **__a : List[Any] , ) -> int: """simple docstring""" __lowercase : Optional[int] = offset if additional_special_tokens is not None: if not isinstance(__a , __a ): raise TypeError( F"additional_special_tokens should be of type {type(__a )}, but is" F" {type(__a )}" ) __lowercase : int = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F"<unk_{i}>" for i in range(len(__a ) , self.offset - 1 ) ] if len(set(__a ) ) != len(__a ): raise ValueError( """Please make sure that the provided additional_special_tokens do not contain an incorrectly""" F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." ) __lowercase : Optional[Any] = additional_special_tokens_extended else: __lowercase : Optional[Any] = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )] super().__init__( __a , tokenizer_file=__a , pad_token=__a , eos_token=__a , unk_token=__a , mask_token=__a , mask_token_sent=__a , offset=__a , additional_special_tokens=__a , **__a , ) __lowercase : str = vocab_file __lowercase : str = False if not self.vocab_file else True def lowerCAmelCase ( self : List[Any] , __a : List[str] ) -> str: """simple docstring""" __lowercase : Optional[Any] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( """There should be 3 special tokens: mask_token, pad_token, and eos_token +""" F" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" ) return [1 if x in all_special_ids else 0 for x in seq] def lowerCAmelCase ( self : Optional[Any] , __a : List , __a : Optional[List] = None , __a : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return self._special_token_mask(__a ) elif token_ids_a is None: return self._special_token_mask(__a ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def lowerCAmelCase ( self : Any , __a : List[Any] , __a : str=None ) -> List[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def lowerCAmelCase ( self : List[Any] , __a : str , __a : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__a ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return __lowercase : Union[str, Any] = os.path.join( __a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ): copyfile(self.vocab_file , __a ) return (out_vocab_file,)
366
from ...processing_utils import ProcessorMixin class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[str] = ['''image_processor''', '''feature_extractor'''] _A : List[Any] = '''TvltImageProcessor''' _A : Optional[int] = '''TvltFeatureExtractor''' def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]: """simple docstring""" super().__init__(image_processor=__a , feature_extractor=__a ) __lowercase : Union[str, Any] = image_processor __lowercase : Tuple = feature_extractor def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict: """simple docstring""" if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) __lowercase : Tuple = None if images is not None: __lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a ) if images_mixed is not None: __lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a ) if audio is not None: __lowercase : Optional[Any] = self.feature_extractor( __a , *__a , sampling_rate=__a , mask_audio=__a , **__a ) __lowercase : Tuple = {} if audio is not None: output_dict.update(__a ) if images is not None: output_dict.update(__a ) if images_mixed_dict is not None: output_dict.update(__a ) return output_dict @property def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase : int = self.image_processor.model_input_names __lowercase : Union[str, Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
306
0
import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[int] , __a : str , __a : int , __a : int ) -> Optional[Any]: """simple docstring""" if dst_width < 0 or dst_height < 0: raise ValueError("""Destination width/height should be > 0""" ) __lowercase : Tuple = img __lowercase : Optional[int] = img.shape[1] __lowercase : Union[str, Any] = img.shape[0] __lowercase : Any = dst_width __lowercase : List[Any] = dst_height __lowercase : Tuple = self.src_w / self.dst_w __lowercase : Optional[Any] = self.src_h / self.dst_h __lowercase : Optional[Any] = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" for i in range(self.dst_h ): for j in range(self.dst_w ): __lowercase : List[Any] = self.img[self.get_y(__a )][self.get_x(__a )] def lowerCAmelCase ( self : Union[str, Any] , __a : int ) -> int: """simple docstring""" return int(self.ratio_x * x ) def lowerCAmelCase ( self : Union[str, Any] , __a : int ) -> int: """simple docstring""" return int(self.ratio_y * y ) if __name__ == "__main__": lowerCamelCase : List[str] = 8_00, 6_00 lowerCamelCase : Dict = imread('''image_data/lena.jpg''', 1) lowerCamelCase : List[str] = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output ) waitKey(0) destroyAllWindows()
367
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class lowerCAmelCase : '''simple docstring''' def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]: """simple docstring""" __lowercase : Tuple = parent __lowercase : int = batch_size __lowercase : Any = seq_length __lowercase : str = is_training __lowercase : str = use_input_mask __lowercase : Optional[int] = use_token_type_ids __lowercase : List[Any] = use_labels __lowercase : Optional[Any] = vocab_size __lowercase : int = hidden_size __lowercase : List[Any] = num_hidden_layers __lowercase : Dict = num_attention_heads __lowercase : Any = intermediate_size __lowercase : Dict = hidden_act __lowercase : Union[str, Any] = hidden_dropout_prob __lowercase : List[Any] = attention_probs_dropout_prob __lowercase : List[str] = max_position_embeddings __lowercase : Union[str, Any] = type_vocab_size __lowercase : Dict = type_sequence_label_size __lowercase : Union[str, Any] = initializer_range __lowercase : List[Any] = num_labels __lowercase : str = num_choices __lowercase : Tuple = scope def lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" __lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : int = None if self.use_input_mask: __lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase : str = None __lowercase : Optional[Any] = None __lowercase : Tuple = None if self.use_labels: __lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowercase : int = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = EsmModel(config=__a ) model.to(__a ) model.eval() __lowercase : str = model(__a , attention_mask=__a ) __lowercase : List[Any] = model(__a ) __lowercase : Optional[int] = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase : List[str] = EsmForMaskedLM(config=__a ) model.to(__a ) model.eval() __lowercase : int = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase : Tuple = self.num_labels __lowercase : Any = EsmForTokenClassification(config=__a ) model.to(__a ) model.eval() __lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" __lowercase : Any = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) : List[str] = config_and_inputs __lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase ( __a , __a , unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = False _A : Any = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) _A : Optional[Any] = () _A : List[Any] = ( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) _A : Optional[Any] = True def lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" __lowercase : Optional[int] = EsmModelTester(self ) __lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowercase : Union[str, Any] = type self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : int ) -> Any: """simple docstring""" __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" __lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) @slow def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : List[str] = EsmModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] __lowercase : List[str] = EsmEmbeddings(config=__a ) __lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) __lowercase : int = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) __lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) def lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] __lowercase : Optional[Any] = EsmEmbeddings(config=__a ) __lowercase : Optional[int] = torch.empty(2 , 4 , 30 ) __lowercase : Tuple = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] __lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) __lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" pass @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" pass @require_torch class lowerCAmelCase ( __a ): '''simple docstring''' @slow def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): __lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() __lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __lowercase : List[str] = model(__a )[0] __lowercase : Union[str, Any] = 33 __lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , __a ) __lowercase : List[Any] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) ) @slow def lowerCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): __lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() __lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) __lowercase : Any = model(__a )[0] # compare the actual values for a slice. __lowercase : int = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
306
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def snake_case_ ( lowerCAmelCase_ : Optional[int] ): return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def snake_case_ ( lowerCAmelCase_ : Tuple ): __lowercase : int = create_tensor(lowerCAmelCase_ ) __lowercase : str = gather(lowerCAmelCase_ ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def snake_case_ ( lowerCAmelCase_ : Optional[Any] ): __lowercase : List[str] = [state.process_index] __lowercase : int = gather_object(lowerCAmelCase_ ) assert len(lowerCAmelCase_ ) == state.num_processes, F"{gathered_obj}, {len(lowerCAmelCase_ )} != {state.num_processes}" assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}" def snake_case_ ( lowerCAmelCase_ : Optional[Any] ): __lowercase : List[str] = create_tensor(lowerCAmelCase_ ) __lowercase : str = broadcast(lowerCAmelCase_ ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def snake_case_ ( lowerCAmelCase_ : Any ): # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: __lowercase : Tuple = torch.arange(state.num_processes + 1 ).to(state.device ) else: __lowercase : str = torch.arange(state.num_processes ).to(state.device ) __lowercase : List[Any] = pad_across_processes(lowerCAmelCase_ ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def snake_case_ ( lowerCAmelCase_ : Optional[Any] ): # For now runs on only two processes if state.num_processes != 2: return __lowercase : Union[str, Any] = create_tensor(lowerCAmelCase_ ) __lowercase : List[Any] = reduce(lowerCAmelCase_ , """sum""" ) __lowercase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ ), F"{reduced_tensor} != {truth_tensor}" def snake_case_ ( lowerCAmelCase_ : Tuple ): # For now runs on only two processes if state.num_processes != 2: return __lowercase : Optional[int] = create_tensor(lowerCAmelCase_ ) __lowercase : Tuple = reduce(lowerCAmelCase_ , """mean""" ) __lowercase : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ ), F"{reduced_tensor} != {truth_tensor}" def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ): # For xla_spawn (TPUs) main() def snake_case_ ( ): __lowercase : Optional[int] = PartialState() state.print(F"State: {state}" ) state.print("""testing gather""" ) test_gather(lowerCAmelCase_ ) state.print("""testing gather_object""" ) test_gather_object(lowerCAmelCase_ ) state.print("""testing broadcast""" ) test_broadcast(lowerCAmelCase_ ) state.print("""testing pad_across_processes""" ) test_pad_across_processes(lowerCAmelCase_ ) state.print("""testing reduce_sum""" ) test_reduce_sum(lowerCAmelCase_ ) state.print("""testing reduce_mean""" ) test_reduce_mean(lowerCAmelCase_ ) if __name__ == "__main__": main()
368
def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : int = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def snake_case_ ( lowerCAmelCase_ : int = 5000 ): __lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )] for i, pentagonal_i in enumerate(lowerCAmelCase_ ): for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ): __lowercase : int = pentagonal_nums[j] __lowercase : Optional[int] = pentagonal_i + pentagonal_j __lowercase : Union[str, Any] = pentagonal_j - pentagonal_i if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ): return b return -1 if __name__ == "__main__": print(f'''{solution() = }''')
306
0
import json import os import torch from diffusers import UNetaDModel os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True) def snake_case_ ( lowerCAmelCase_ : str ): if hor == 128: __lowercase : Any = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") __lowercase : List[Any] = (32, 128, 256) __lowercase : Any = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 32: __lowercase : str = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") __lowercase : int = (32, 64, 128, 256) __lowercase : int = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") __lowercase : List[str] = torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" ) __lowercase : Any = model.state_dict() __lowercase : Union[str, Any] = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 14, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 65536, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } __lowercase : Dict = UNetaDModel(**lowerCAmelCase_ ) print(F"length of state dict: {len(state_dict.keys() )}" ) print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" ) __lowercase : Any = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): __lowercase : Union[str, Any] = state_dict.pop(lowerCAmelCase_ ) hf_value_function.load_state_dict(lowerCAmelCase_ ) torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" ) with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , """w""" ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) def snake_case_ ( ): __lowercase : List[Any] = { """in_channels""": 14, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (32, 64, 128, 256), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 65536, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } __lowercase : Optional[Any] = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) __lowercase : int = model __lowercase : Union[str, Any] = UNetaDModel(**lowerCAmelCase_ ) print(F"length of state dict: {len(state_dict.keys() )}" ) print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" ) __lowercase : str = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): __lowercase : int = state_dict.pop(lowerCAmelCase_ ) hf_value_function.load_state_dict(lowerCAmelCase_ ) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": unet(32) # unet(128) value_function()
369
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase ( __a ): '''simple docstring''' _A : Optional[Any] = (DPMSolverSDEScheduler,) _A : Dict = 10 def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]: """simple docstring""" __lowercase : Any = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**__a ) return config def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__a ) def lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__a ) def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = self.scheduler_classes[0] __lowercase : List[str] = self.get_scheduler_config() __lowercase : Any = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase : Optional[Any] = self.dummy_model() __lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase : Optional[Any] = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): __lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[Any] = model(__a , __a ) __lowercase : Optional[Any] = scheduler.step(__a , __a , __a ) __lowercase : str = output.prev_sample __lowercase : Optional[Any] = torch.sum(torch.abs(__a ) ) __lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase : Tuple = self.scheduler_classes[0] __lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" ) __lowercase : int = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase : Optional[int] = self.dummy_model() __lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase : Dict = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): __lowercase : Dict = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[int] = model(__a , __a ) __lowercase : Optional[int] = scheduler.step(__a , __a , __a ) __lowercase : int = output.prev_sample __lowercase : Optional[Any] = torch.sum(torch.abs(__a ) ) __lowercase : List[str] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3 def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = self.scheduler_classes[0] __lowercase : Dict = self.get_scheduler_config() __lowercase : Optional[int] = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) __lowercase : int = self.dummy_model() __lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: __lowercase : int = scheduler.scale_model_input(__a , __a ) __lowercase : List[str] = model(__a , __a ) __lowercase : List[str] = scheduler.step(__a , __a , __a ) __lowercase : int = output.prev_sample __lowercase : List[Any] = torch.sum(torch.abs(__a ) ) __lowercase : Optional[Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase : str = self.scheduler_classes[0] __lowercase : List[Any] = self.get_scheduler_config() __lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) __lowercase : List[str] = self.dummy_model() __lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma __lowercase : str = sample.to(__a ) for t in scheduler.timesteps: __lowercase : List[Any] = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[Any] = model(__a , __a ) __lowercase : Any = scheduler.step(__a , __a , __a ) __lowercase : Optional[Any] = output.prev_sample __lowercase : Any = torch.sum(torch.abs(__a ) ) __lowercase : Optional[Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
306
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class lowerCAmelCase ( __a ): '''simple docstring''' _A : Tuple = '''microsoft/speecht5_tts''' _A : List[str] = ( '''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the ''' '''text to read (in English) and returns a waveform object containing the sound.''' ) _A : Optional[int] = '''text_reader''' _A : List[Any] = SpeechTaProcessor _A : List[Any] = SpeechTaForTextToSpeech _A : Union[str, Any] = SpeechTaHifiGan _A : Tuple = ['''text'''] _A : Union[str, Any] = ['''audio'''] def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" if self.post_processor is None: __lowercase : List[str] = """microsoft/speecht5_hifigan""" super().setup() def lowerCAmelCase ( self : Any , __a : Optional[int] , __a : int=None ) -> Any: """simple docstring""" __lowercase : Optional[Any] = self.pre_processor(text=__a , return_tensors="""pt""" , truncation=__a ) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" ) __lowercase : Union[str, Any] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" ) __lowercase : Any = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 ) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def lowerCAmelCase ( self : Tuple , __a : Any ) -> Any: """simple docstring""" with torch.no_grad(): return self.model.generate_speech(**__a ) def lowerCAmelCase ( self : int , __a : Union[str, Any] ) -> int: """simple docstring""" with torch.no_grad(): return self.post_processor(__a ).cpu().detach()
370
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate lowerCamelCase : str = trt.Logger(trt.Logger.WARNING) lowerCamelCase : Any = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) lowerCamelCase : Optional[Any] = logging.getLogger(__name__) lowerCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--onnx_model_path''', default=None, type=str, required=True, help='''Path to ONNX model: ''', ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''The output directory where the model checkpoints and predictions will be written.''', ) # Other parameters parser.add_argument( '''--tokenizer_name''', default='''''', type=str, required=True, help='''Pretrained tokenizer name or path if not the same as model_name''', ) parser.add_argument( '''--version_2_with_negative''', action='''store_true''', help='''If true, the SQuAD examples contain some that do not have an answer.''', ) parser.add_argument( '''--null_score_diff_threshold''', type=float, default=0.0, help='''If null_score - best_non_null is greater than the threshold predict null.''', ) parser.add_argument( '''--max_seq_length''', default=3_84, type=int, help=( '''The maximum total input sequence length after WordPiece tokenization. Sequences ''' '''longer than this will be truncated, and sequences shorter than this will be padded.''' ), ) parser.add_argument( '''--doc_stride''', default=1_28, type=int, help='''When splitting up a long document into chunks, how much stride to take between chunks.''', ) parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''') parser.add_argument( '''--n_best_size''', default=20, type=int, help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''', ) parser.add_argument( '''--max_answer_length''', default=30, type=int, help=( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ), ) parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''') parser.add_argument( '''--dataset_name''', type=str, default=None, required=True, help='''The name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--dataset_config_name''', type=str, default=None, help='''The configuration name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.''' ) parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''') parser.add_argument( '''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision instead of 32-bit''', ) parser.add_argument( '''--int8''', action='''store_true''', help='''Whether to use INT8''', ) lowerCamelCase : Dict = parser.parse_args() if args.tokenizer_name: lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) logger.info('''Training/evaluation parameters %s''', args) lowerCamelCase : List[str] = args.per_device_eval_batch_size lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties lowerCamelCase : List[str] = True lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine''' if args.fpaa: lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine''' if args.inta: lowerCamelCase : int = '''temp_engine/bert-int8.engine''' # import ONNX file if not os.path.exists('''temp_engine'''): os.makedirs('''temp_engine''') lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, '''rb''') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)] lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: lowerCamelCase : List[str] = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) lowerCamelCase : Optional[int] = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) lowerCamelCase : Optional[Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, '''wb''') as f: f.write(engine.serialize()) def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ): __lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) __lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) __lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ ) # start time __lowercase : Optional[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Synchronize the stream and take time stream.synchronize() # end time __lowercase : int = time.time() __lowercase : Union[str, Any] = end_time - start_time __lowercase : Any = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. lowerCamelCase : Tuple = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('''Evaluation requires a dataset name''') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0] lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1] lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). lowerCamelCase : Dict = tokenizer.padding_side == '''right''' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length) def snake_case_ ( lowerCAmelCase_ : int ): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace __lowercase : str = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __lowercase : List[str] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __lowercase : Any = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ ) __lowercase : List[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __lowercase : List[str] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __lowercase : Dict = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples lowerCamelCase : Tuple = raw_datasets['''validation'''] # Validation Feature Creation lowerCamelCase : Optional[int] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='''Running tokenizer on validation dataset''', ) lowerCamelCase : Union[str, Any] = default_data_collator lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping''']) lowerCamelCase : List[str] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ): # Post-processing: we match the start logits and end logits to answers in the original context. __lowercase : int = postprocess_qa_predictions( examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __lowercase : Optional[int] = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: __lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] __lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ ) lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''') # Evaluation! logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path) with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def snake_case_ ( lowerCAmelCase_ : str ): return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize # Allocate device memory for inputs and outputs. lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes) lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. lowerCamelCase : Optional[int] = cuda.Stream() # Evaluation logger.info('''***** Running Evaluation *****''') logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') lowerCamelCase : int = 0.0 lowerCamelCase : List[str] = 0 lowerCamelCase : List[str] = timeit.default_timer() lowerCamelCase : List[Any] = None for step, batch in enumerate(eval_dataloader): lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs lowerCamelCase : Optional[Any] = torch.tensor(start_logits) lowerCamelCase : List[str] = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00) lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00) lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00) if all_preds is not None: lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset)) lowerCamelCase : Dict = timeit.default_timer() - start_time logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter)) logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00)) logger.info('''Total Number of Inference = %d''', niter) lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds) lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
306
0
"""simple docstring""" import argparse from collections import defaultdict import yaml lowerCamelCase : Any = '''docs/source/en/_toctree.yml''' def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ): __lowercase : Any = defaultdict(lowerCAmelCase_ ) __lowercase : Optional[int] = [] __lowercase : Union[str, Any] = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(lowerCAmelCase_ ) __lowercase : Optional[int] = new_doc_list __lowercase : str = [key for key, value in counts.items() if value > 1] __lowercase : Optional[Any] = [] for duplicate_key in duplicates: __lowercase : Tuple = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(lowerCAmelCase_ ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) __lowercase : Dict = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(lowerCAmelCase_ ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(lowerCAmelCase_ ) # Sort return overview_doc def snake_case_ ( lowerCAmelCase_ : Any=False ): with open(lowerCAmelCase_ , encoding="""utf-8""" ) as f: __lowercase : Optional[int] = yaml.safe_load(f.read() ) # Get to the API doc __lowercase : str = 0 while content[api_idx]["title"] != "API": api_idx += 1 __lowercase : Dict = content[api_idx]["""sections"""] # Then to the model doc __lowercase : Dict = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 __lowercase : int = api_doc[scheduler_idx]["""sections"""] __lowercase : Optional[int] = clean_doc_toc(lowerCAmelCase_ ) __lowercase : int = False if new_scheduler_doc != scheduler_doc: __lowercase : Dict = True if overwrite: __lowercase : int = new_scheduler_doc if diff: if overwrite: __lowercase : Optional[Any] = api_doc with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(lowerCAmelCase_ , allow_unicode=lowerCAmelCase_ ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def snake_case_ ( lowerCAmelCase_ : Dict=False ): with open(lowerCAmelCase_ , encoding="""utf-8""" ) as f: __lowercase : List[str] = yaml.safe_load(f.read() ) # Get to the API doc __lowercase : Any = 0 while content[api_idx]["title"] != "API": api_idx += 1 __lowercase : Optional[Any] = content[api_idx]["""sections"""] # Then to the model doc __lowercase : Tuple = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 __lowercase : Tuple = False __lowercase : Union[str, Any] = api_doc[pipeline_idx]["""sections"""] __lowercase : Optional[int] = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: __lowercase : str = pipeline_doc["""section"""] __lowercase : Optional[Any] = clean_doc_toc(lowerCAmelCase_ ) if overwrite: __lowercase : Union[str, Any] = new_sub_pipeline_doc new_pipeline_docs.append(lowerCAmelCase_ ) # sort overall pipeline doc __lowercase : int = clean_doc_toc(lowerCAmelCase_ ) if new_pipeline_docs != pipeline_docs: __lowercase : List[Any] = True if overwrite: __lowercase : Optional[Any] = new_pipeline_docs if diff: if overwrite: __lowercase : List[str] = api_doc with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(lowerCAmelCase_ , allow_unicode=lowerCAmelCase_ ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') lowerCamelCase : Dict = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
371
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase : str = { '''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''', } class lowerCAmelCase ( __a ): '''simple docstring''' _A : int = '''nllb-moe''' _A : List[str] = ['''past_key_values'''] _A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any: """simple docstring""" __lowercase : int = vocab_size __lowercase : List[Any] = max_position_embeddings __lowercase : Tuple = d_model __lowercase : str = encoder_ffn_dim __lowercase : List[str] = encoder_layers __lowercase : int = encoder_attention_heads __lowercase : List[Any] = decoder_ffn_dim __lowercase : int = decoder_layers __lowercase : Optional[int] = decoder_attention_heads __lowercase : Union[str, Any] = dropout __lowercase : str = attention_dropout __lowercase : Any = activation_dropout __lowercase : List[Any] = activation_function __lowercase : List[str] = init_std __lowercase : Optional[int] = encoder_layerdrop __lowercase : str = decoder_layerdrop __lowercase : Dict = use_cache __lowercase : Optional[Any] = encoder_layers __lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True __lowercase : List[Any] = router_z_loss_coef __lowercase : Tuple = router_aux_loss_coef __lowercase : str = decoder_sparse_step __lowercase : Any = encoder_sparse_step __lowercase : str = num_experts __lowercase : List[Any] = expert_capacity __lowercase : int = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" ) __lowercase : Optional[int] = router_dtype __lowercase : Any = router_ignore_padding_tokens __lowercase : Optional[Any] = batch_prioritized_routing __lowercase : str = second_expert_policy __lowercase : List[str] = normalize_router_prob_before_dropping __lowercase : List[Any] = moe_eval_capacity_token_fraction __lowercase : List[str] = moe_token_dropout __lowercase : Optional[Any] = output_router_logits super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
306
0
"""simple docstring""" from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING lowerCamelCase : int = logging.get_logger(__name__) @add_end_docstrings(__a ) class lowerCAmelCase ( __a ): '''simple docstring''' def __init__( self : Tuple , **__a : Dict ) -> str: """simple docstring""" super().__init__(**__a ) requires_backends(self , """vision""" ) requires_backends(self , """torch""" ) if self.framework != "pt": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) self.check_model_type(__a ) def lowerCAmelCase ( self : Any , **__a : List[str] ) -> List[Any]: """simple docstring""" __lowercase : str = {} __lowercase : List[Any] = {} __lowercase : int = {} # preprocess args if "points_per_batch" in kwargs: __lowercase : str = kwargs["""points_per_batch"""] if "points_per_crop" in kwargs: __lowercase : Union[str, Any] = kwargs["""points_per_crop"""] if "crops_n_layers" in kwargs: __lowercase : str = kwargs["""crops_n_layers"""] if "crop_overlap_ratio" in kwargs: __lowercase : str = kwargs["""crop_overlap_ratio"""] if "crop_n_points_downscale_factor" in kwargs: __lowercase : List[str] = kwargs["""crop_n_points_downscale_factor"""] # postprocess args if "pred_iou_thresh" in kwargs: __lowercase : List[str] = kwargs["""pred_iou_thresh"""] if "stability_score_offset" in kwargs: __lowercase : Optional[int] = kwargs["""stability_score_offset"""] if "mask_threshold" in kwargs: __lowercase : str = kwargs["""mask_threshold"""] if "stability_score_thresh" in kwargs: __lowercase : List[Any] = kwargs["""stability_score_thresh"""] if "crops_nms_thresh" in kwargs: __lowercase : List[str] = kwargs["""crops_nms_thresh"""] if "output_rle_mask" in kwargs: __lowercase : Optional[int] = kwargs["""output_rle_mask"""] if "output_bboxes_mask" in kwargs: __lowercase : Union[str, Any] = kwargs["""output_bboxes_mask"""] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : int , __a : int , *__a : List[str] , __a : Dict=None , __a : int=None , **__a : Tuple ) -> Optional[int]: """simple docstring""" return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a ) def lowerCAmelCase ( self : int , __a : Tuple , __a : List[str]=64 , __a : int = 0 , __a : float = 512 / 1500 , __a : Optional[int] = 32 , __a : Optional[int] = 1 , ) -> Tuple: """simple docstring""" __lowercase : str = load_image(__a ) __lowercase : str = self.image_processor.size["""longest_edge"""] __lowercase : List[Any] = self.image_processor.generate_crop_boxes( __a , __a , __a , __a , __a , __a ) __lowercase : List[str] = self.image_processor(images=__a , return_tensors="""pt""" ) with self.device_placement(): if self.framework == "pt": __lowercase : Union[str, Any] = self.get_inference_context() with inference_context(): __lowercase : int = self._ensure_tensor_on_device(__a , device=self.device ) __lowercase : Union[str, Any] = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) ) __lowercase : int = image_embeddings __lowercase : List[Any] = grid_points.shape[1] __lowercase : Optional[int] = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( """Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """ """To return all points at once, set points_per_batch to None""" ) for i in range(0 , __a , __a ): __lowercase : List[str] = grid_points[:, i : i + points_per_batch, :, :] __lowercase : str = input_labels[:, i : i + points_per_batch] __lowercase : Optional[int] = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : List[Any]=0.88 , __a : Any=0.95 , __a : Any=0 , __a : List[Any]=1 , ) -> Optional[int]: """simple docstring""" __lowercase : Tuple = model_inputs.pop("""input_boxes""" ) __lowercase : Union[str, Any] = model_inputs.pop("""is_last""" ) __lowercase : str = model_inputs.pop("""original_sizes""" ).tolist() __lowercase : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist() __lowercase : Optional[Any] = self.model(**__a ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks __lowercase : str = model_outputs["""pred_masks"""] __lowercase : str = self.image_processor.post_process_masks( __a , __a , __a , __a , binarize=__a ) __lowercase : Optional[Any] = model_outputs["""iou_scores"""] __lowercase : int = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : List[str]=False , __a : str=False , __a : Any=0.7 , ) -> Any: """simple docstring""" __lowercase : Any = [] __lowercase : Tuple = [] __lowercase : Union[str, Any] = [] for model_output in model_outputs: all_scores.append(model_output.pop("""iou_scores""" ) ) all_masks.extend(model_output.pop("""masks""" ) ) all_boxes.append(model_output.pop("""boxes""" ) ) __lowercase : List[Any] = torch.cat(__a ) __lowercase : Tuple = torch.cat(__a ) __lowercase : Dict = self.image_processor.post_process_for_mask_generation( __a , __a , __a , __a ) __lowercase : Optional[Any] = defaultdict(__a ) for output in model_outputs: for k, v in output.items(): extra[k].append(__a ) __lowercase : Optional[Any] = {} if output_rle_mask: __lowercase : List[str] = rle_mask if output_bboxes_mask: __lowercase : Any = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
350
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase : Optional[Any] = { '''configuration_poolformer''': [ '''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PoolFormerConfig''', '''PoolFormerOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = ['''PoolFormerFeatureExtractor'''] lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[str] = [ '''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PoolFormerForImageClassification''', '''PoolFormerModel''', '''PoolFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
306
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase : Tuple = logging.get_logger(__name__) def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Any=False ): __lowercase : List[str] = """backbone.""" if is_semantic else """""" __lowercase : Tuple = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ (F"{prefix}cls_token", """beit.embeddings.cls_token"""), (F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""), (F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""), (F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ("""mask_token""", """beit.embeddings.mask_token"""), ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) else: # layernorm + classification head rename_keys.extend( [ ("""fc_norm.weight""", """beit.pooler.layernorm.weight"""), ("""fc_norm.bias""", """beit.pooler.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Tuple=False ): for i in range(config.num_hidden_layers ): __lowercase : Optional[Any] = """backbone.""" if is_semantic else """""" # queries, keys and values __lowercase : List[str] = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" ) __lowercase : Optional[int] = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" ) __lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" ) __lowercase : List[Any] = in_proj_weight[ : config.hidden_size, : ] __lowercase : int = q_bias __lowercase : Optional[int] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowercase : Tuple = in_proj_weight[ -config.hidden_size :, : ] __lowercase : Union[str, Any] = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained __lowercase : Union[str, Any] = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" ) __lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" ) __lowercase : Dict = gamma_a __lowercase : Any = gamma_a def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ): __lowercase : Optional[int] = dct.pop(lowerCAmelCase_ ) __lowercase : Optional[int] = val def snake_case_ ( ): __lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowercase : Optional[int] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ) return im @torch.no_grad() def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any=False ): __lowercase : int = False if """rvlcdip""" in checkpoint_url else True __lowercase : Optional[Any] = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: __lowercase : Tuple = 1024 __lowercase : str = 4096 __lowercase : List[Any] = 24 __lowercase : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: __lowercase : int = 16 __lowercase : Dict = """huggingface/label-files""" __lowercase : List[str] = """rvlcdip-id2label.json""" __lowercase : Union[str, Any] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) ) __lowercase : List[Any] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} __lowercase : Tuple = idalabel __lowercase : Optional[int] = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys __lowercase : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""] __lowercase : Tuple = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ ) for src, dest in rename_keys: rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ ) # load HuggingFace model __lowercase : List[Any] = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ ) model.eval() model.load_state_dict(lowerCAmelCase_ ) # Check outputs on an image __lowercase : int = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ ) __lowercase : Optional[int] = prepare_img() __lowercase : str = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ) __lowercase : Optional[Any] = encoding["""pixel_values"""] __lowercase : int = model(lowerCAmelCase_ ) __lowercase : Tuple = outputs.logits # verify logits __lowercase : List[str] = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected" Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(lowerCAmelCase_ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(lowerCAmelCase_ ) if push_to_hub: if has_lm_head: __lowercase : Optional[int] = """dit-base""" if """base""" in checkpoint_url else """dit-large""" else: __lowercase : str = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip""" image_processor.push_to_hub( repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , ) model.push_to_hub( repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , ) if __name__ == "__main__": lowerCamelCase : str = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) lowerCamelCase : List[str] = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
351
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : List[str] = 2 __lowercase : Union[str, Any] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowerCAmelCase_ ) if n > 1: factors.append(lowerCAmelCase_ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
306
0
import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase : Union[str, Any] = [ '''word_embeddings_layernorm.weight''', '''word_embeddings_layernorm.bias''', '''input_layernorm.weight''', '''input_layernorm.bias''', '''post_attention_layernorm.weight''', '''post_attention_layernorm.bias''', '''self_attention.dense.bias''', '''mlp.dense_4h_to_h.bias''', '''ln_f.weight''', '''ln_f.bias''', ] lowerCamelCase : Union[str, Any] = [ '''mlp.dense_4h_to_h.weight''', '''self_attention.dense.weight''', ] def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ): __lowercase : int = { """word_embeddings.weight""": """word_embeddings.weight""", """word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""", """word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""", """weight""": """ln_f.weight""", """bias""": """ln_f.bias""", } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks __lowercase : int = int(re.match(r""".*layer_(\d*).*""" , lowerCAmelCase_ )[1] ) layer_number -= 3 return F"h.{layer_number}." + key def snake_case_ ( lowerCAmelCase_ : Tuple ): if dtype == torch.bool: return 1 / 8 __lowercase : Any = re.search(r"""[^\d](\d+)$""" , str(lowerCAmelCase_ ) ) if bit_search is None: raise ValueError(F"`dtype` is not a valid dtype: {dtype}." ) __lowercase : str = int(bit_search.groups()[0] ) return bit_size // 8 def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] ): # Construct model if bloom_config_file == "": __lowercase : Union[str, Any] = BloomConfig() else: __lowercase : List[str] = BloomConfig.from_json_file(lowerCAmelCase_ ) if shard_model: __lowercase : Any = os.listdir(lowerCAmelCase_ ) __lowercase : Optional[Any] = sorted(filter(lambda lowerCAmelCase_ : s.startswith("""layer""" ) and "model_00" in s , lowerCAmelCase_ ) ) __lowercase : Dict = {"""weight_map""": {}, """metadata""": {}} __lowercase : str = 0 __lowercase : str = None __lowercase : Any = BloomConfig() for j, file in enumerate(lowerCAmelCase_ ): print("""Processing file: {}""".format(lowerCAmelCase_ ) ) __lowercase : int = None for i in range(lowerCAmelCase_ ): # load all TP files __lowercase : Optional[Any] = file.replace("""model_00""" , F"model_0{i}" ) __lowercase : List[Any] = torch.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , map_location="""cpu""" ) # Rename keys in the transformers names __lowercase : str = list(temp.keys() ) for key in keys: __lowercase : List[str] = temp.pop(lowerCAmelCase_ ) if tensors is None: __lowercase : Union[str, Any] = temp else: for key in tensors.keys(): if any(key.endswith(lowerCAmelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel __lowercase : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks __lowercase : Tuple = torch.cat([tensors[key], temp[key]] , dim=lowerCAmelCase_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(lowerCAmelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): __lowercase : Any = tensors[key] / pretraining_tp torch.save( lowerCAmelCase_ , os.path.join( lowerCAmelCase_ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(lowerCAmelCase_ ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): __lowercase : Optional[Any] = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: __lowercase : Any = """pytorch_model_{}-of-{}.bin""".format( str(j + 1 ).zfill(5 ) , str(len(lowerCAmelCase_ ) ).zfill(5 ) ) __lowercase : str = BloomConfig() __lowercase : Tuple = pytorch_dump_folder_path + """/""" + CONFIG_NAME __lowercase : Tuple = total_size with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) with open(os.path.join(lowerCAmelCase_ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f: __lowercase : Optional[int] = json.dumps(lowerCAmelCase_ , indent=2 , sort_keys=lowerCAmelCase_ ) + """\n""" f.write(lowerCAmelCase_ ) else: __lowercase : Tuple = BloomModel(lowerCAmelCase_ ) __lowercase : Tuple = os.listdir(lowerCAmelCase_ ) __lowercase : List[Any] = sorted(filter(lambda lowerCAmelCase_ : s.startswith("""layer""" ) and "model_00" in s , lowerCAmelCase_ ) ) __lowercase : Dict = None for i, file in enumerate(lowerCAmelCase_ ): __lowercase : Optional[int] = None for i in range(lowerCAmelCase_ ): # load all TP files __lowercase : int = file.replace("""model_00""" , F"model_0{i}" ) __lowercase : str = torch.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , map_location="""cpu""" ) # Rename keys in the transformers names __lowercase : Any = list(temp.keys() ) for key in keys: __lowercase : List[Any] = temp.pop(lowerCAmelCase_ ) if tensors is None: __lowercase : int = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(lowerCAmelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel __lowercase : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks __lowercase : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=lowerCAmelCase_ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(lowerCAmelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): __lowercase : List[str] = tensors[key] / pretraining_tp __lowercase : Union[str, Any] = model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ ) assert not other_keys.unexpected_keys, F"The keys {other_keys.unexpected_keys} are unexpected" if missing_keys is None: __lowercase : List[Any] = set(other_keys.missing_keys ) else: __lowercase : Union[str, Any] = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, F"The keys {missing_keys} are missing" # Save pytorch-model os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) __lowercase : int = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME __lowercase : Optional[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" ) if config.torch_dtype is not None: __lowercase : Union[str, Any] = model.to(config.torch_dtype ) torch.save(model.state_dict() , lowerCAmelCase_ ) print(F"Save configuration file to {pytorch_config_dump_path}" ) with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bloom_checkpoint_path''', default=None, type=str, required=True, help='''Path to the Megatron-LM checkpoint path.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--bloom_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--shard_model''', action='''store_true''', help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''', ) parser.add_argument( '''--pretraining_tp''', default=4, type=int, help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''', ) lowerCamelCase : Union[str, Any] = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
352
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __lowercase : List[str] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __lowercase : Optional[Any] = model(__a )["""last_hidden_state"""] __lowercase : Any = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __a ) # compare the actual values for a slice. __lowercase : Dict = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
306
0
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ): '''simple docstring''' _A : Dict = StableUnCLIPImgaImgPipeline _A : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _A : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _A : Optional[int] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _A : Union[str, Any] = frozenset([] ) def lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" __lowercase : Dict = 32 __lowercase : int = embedder_hidden_size # image encoding components __lowercase : Dict = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) __lowercase : Tuple = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=__a , projection_dim=__a , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) __lowercase : Tuple = StableUnCLIPImageNormalizer(embedding_dim=__a ) __lowercase : Union[str, Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) __lowercase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) __lowercase : Union[str, Any] = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) __lowercase : List[str] = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__a , layers_per_block=1 , upcast_attention=__a , use_linear_projection=__a , ) torch.manual_seed(0 ) __lowercase : int = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.00085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__a , steps_offset=1 , ) torch.manual_seed(0 ) __lowercase : Union[str, Any] = AutoencoderKL() __lowercase : Tuple = { # image encoding components """feature_extractor""": feature_extractor, """image_encoder""": image_encoder.eval(), # image noising components """image_normalizer""": image_normalizer.eval(), """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder.eval(), """unet""": unet.eval(), """scheduler""": scheduler, """vae""": vae.eval(), } return components def lowerCAmelCase ( self : List[Any] , __a : int , __a : List[str]=0 , __a : Union[str, Any]=True ) -> Union[str, Any]: """simple docstring""" if str(__a ).startswith("""mps""" ): __lowercase : Optional[int] = torch.manual_seed(__a ) else: __lowercase : List[Any] = torch.Generator(device=__a ).manual_seed(__a ) __lowercase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a ) if pil_image: __lowercase : Optional[Any] = input_image * 0.5 + 0.5 __lowercase : List[str] = input_image.clamp(0 , 1 ) __lowercase : Any = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __lowercase : Optional[Any] = DiffusionPipeline.numpy_to_pil(__a )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def lowerCAmelCase ( self : str ) -> int: """simple docstring""" __lowercase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator __lowercase : int = self.get_dummy_components() __lowercase : int = StableUnCLIPImgaImgPipeline(**__a ) __lowercase : Optional[int] = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) __lowercase : Optional[Any] = self.get_dummy_inputs(__a ) inputs.update({"""image_embeds""": None} ) __lowercase : Union[str, Any] = sd_pipe(**__a ).images __lowercase : int = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowercase : Union[str, Any] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __lowercase : str = torch_device in ["""cpu""", """mps"""] self._test_attention_slicing_forward_pass(test_max_difference=__a ) def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" __lowercase : Union[str, Any] = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=__a ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__a ) @slow @require_torch_gpu class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Optional[int] ) -> int: """simple docstring""" __lowercase : Any = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) __lowercase : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" ) __lowercase : Any = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowercase : List[Any] = pipe(__a , """anime turle""" , generator=__a , output_type="""np""" ) __lowercase : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__a , __a ) def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) __lowercase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" ) __lowercase : Dict = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase : str = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowercase : Union[str, Any] = pipe(__a , """anime turle""" , generator=__a , output_type="""np""" ) __lowercase : Any = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__a , __a ) def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowercase : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa ) __lowercase : List[Any] = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __lowercase : Dict = pipe( __a , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , ) __lowercase : Dict = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
353
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : Optional[Any] = len(lowerCAmelCase_ ) __lowercase : str = len(lowerCAmelCase_ ) __lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __lowercase : Tuple = True for i in range(lowerCAmelCase_ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __lowercase : Optional[Any] = True if a[i].islower(): __lowercase : Dict = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
306
0
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class lowerCAmelCase ( __a ): '''simple docstring''' _A : Dict = ['''vqvae'''] def __init__( self : List[str] , __a : AutoencoderKL , __a : UNetaDConditionModel , __a : Mel , __a : Union[DDIMScheduler, DDPMScheduler] , ) -> str: """simple docstring""" super().__init__() self.register_modules(unet=__a , scheduler=__a , mel=__a , vqvae=__a ) def lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" return 50 if isinstance(self.scheduler , __a ) else 1000 @torch.no_grad() def __call__( self : Union[str, Any] , __a : int = 1 , __a : str = None , __a : np.ndarray = None , __a : int = 0 , __a : int = 0 , __a : int = None , __a : torch.Generator = None , __a : float = 0 , __a : float = 0 , __a : torch.Generator = None , __a : float = 0 , __a : torch.Tensor = None , __a : torch.Tensor = None , __a : Any=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: """simple docstring""" __lowercase : Optional[int] = steps or self.get_default_steps() self.scheduler.set_timesteps(__a ) __lowercase : Any = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: __lowercase : Dict = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: __lowercase : Dict = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=__a , device=self.device , ) __lowercase : List[Any] = noise __lowercase : List[str] = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(__a , __a ) __lowercase : Dict = self.mel.audio_slice_to_image(__a ) __lowercase : List[str] = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape( (input_image.height, input_image.width) ) __lowercase : Optional[Any] = (input_image / 255) * 2 - 1 __lowercase : int = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: __lowercase : List[str] = self.vqvae.encode(torch.unsqueeze(__a , 0 ) ).latent_dist.sample( generator=__a )[0] __lowercase : Any = self.vqvae.config.scaling_factor * input_images if start_step > 0: __lowercase : List[str] = self.scheduler.add_noise(__a , __a , self.scheduler.timesteps[start_step - 1] ) __lowercase : Dict = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) __lowercase : Union[str, Any] = int(mask_start_secs * pixels_per_second ) __lowercase : Optional[int] = int(mask_end_secs * pixels_per_second ) __lowercase : Optional[Any] = self.scheduler.add_noise(__a , __a , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , __a ): __lowercase : List[Any] = self.unet(__a , __a , __a )["""sample"""] else: __lowercase : Any = self.unet(__a , __a )["""sample"""] if isinstance(self.scheduler , __a ): __lowercase : List[str] = self.scheduler.step( model_output=__a , timestep=__a , sample=__a , eta=__a , generator=__a , )["""prev_sample"""] else: __lowercase : str = self.scheduler.step( model_output=__a , timestep=__a , sample=__a , generator=__a , )["""prev_sample"""] if mask is not None: if mask_start > 0: __lowercase : Tuple = mask[:, step, :, :mask_start] if mask_end > 0: __lowercase : Tuple = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance __lowercase : int = 1 / self.vqvae.config.scaling_factor * images __lowercase : List[str] = self.vqvae.decode(__a )["""sample"""] __lowercase : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 ) __lowercase : Any = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() __lowercase : Any = (images * 255).round().astype("""uint8""" ) __lowercase : str = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(__a , mode="""RGB""" ).convert("""L""" ) for _ in images) ) __lowercase : str = [self.mel.image_to_audio(__a ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(__a )[:, np.newaxis, :] ) , **ImagePipelineOutput(__a ) ) @torch.no_grad() def lowerCAmelCase ( self : List[Any] , __a : List[Image.Image] , __a : int = 50 ) -> np.ndarray: """simple docstring""" assert isinstance(self.scheduler , __a ) self.scheduler.set_timesteps(__a ) __lowercase : List[Any] = np.array( [np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] ) __lowercase : List[Any] = (sample / 255) * 2 - 1 __lowercase : Any = torch.Tensor(__a ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): __lowercase : List[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps __lowercase : List[str] = self.scheduler.alphas_cumprod[t] __lowercase : int = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) __lowercase : Tuple = 1 - alpha_prod_t __lowercase : Union[str, Any] = self.unet(__a , __a )["""sample"""] __lowercase : int = (1 - alpha_prod_t_prev) ** 0.5 * model_output __lowercase : Optional[Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) __lowercase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def lowerCAmelCase ( __a : torch.Tensor , __a : torch.Tensor , __a : float ) -> torch.Tensor: """simple docstring""" __lowercase : Optional[Any] = acos(torch.dot(torch.flatten(__a ) , torch.flatten(__a ) ) / torch.norm(__a ) / torch.norm(__a ) ) return sin((1 - alpha) * theta ) * xa / sin(__a ) + sin(alpha * theta ) * xa / sin(__a )
354
from scipy.stats import spearmanr import datasets lowerCamelCase : List[str] = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' lowerCamelCase : List[str] = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' lowerCamelCase : Union[str, Any] = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , ) def lowerCAmelCase ( self : List[Any] , __a : str , __a : Any , __a : Optional[int]=False ) -> List[str]: """simple docstring""" __lowercase : Optional[Any] = spearmanr(__a , __a ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
306
0
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCamelCase : List[Any] = logging.get_logger(__name__) @add_end_docstrings(__a ) class lowerCAmelCase ( __a ): '''simple docstring''' def __init__( self : Any , **__a : Tuple ) -> Any: """simple docstring""" super().__init__(**__a ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : List[str] , __a : Union[str, List[str], "Image", List["Image"]] , **__a : Optional[Any] ) -> Optional[Any]: """simple docstring""" return super().__call__(__a , **__a ) def lowerCAmelCase ( self : Optional[Any] , **__a : Optional[int] ) -> List[str]: """simple docstring""" __lowercase : List[Any] = {} if "candidate_labels" in kwargs: __lowercase : Optional[Any] = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: __lowercase : Union[str, Any] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def lowerCAmelCase ( self : Optional[Any] , __a : Optional[Any] , __a : Tuple=None , __a : List[Any]="This is a photo of {}." ) -> Optional[Any]: """simple docstring""" __lowercase : Union[str, Any] = load_image(__a ) __lowercase : Tuple = self.image_processor(images=[image] , return_tensors=self.framework ) __lowercase : Optional[int] = candidate_labels __lowercase : Tuple = [hypothesis_template.format(__a ) for x in candidate_labels] __lowercase : int = self.tokenizer(__a , return_tensors=self.framework , padding=__a ) __lowercase : List[Any] = [text_inputs] return inputs def lowerCAmelCase ( self : Dict , __a : Tuple ) -> Optional[int]: """simple docstring""" __lowercase : Optional[int] = model_inputs.pop("""candidate_labels""" ) __lowercase : Optional[int] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , __a ): __lowercase : Any = text_inputs[0] else: # Batching case. __lowercase : int = text_inputs[0][0] __lowercase : Dict = self.model(**__a , **__a ) __lowercase : List[Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def lowerCAmelCase ( self : str , __a : Dict ) -> List[Any]: """simple docstring""" __lowercase : List[Any] = model_outputs.pop("""candidate_labels""" ) __lowercase : Union[str, Any] = model_outputs["""logits"""][0] if self.framework == "pt": __lowercase : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 ) __lowercase : Optional[Any] = probs.tolist() if not isinstance(__a , __a ): __lowercase : List[str] = [scores] elif self.framework == "tf": __lowercase : Union[str, Any] = stable_softmax(__a , axis=-1 ) __lowercase : Tuple = probs.numpy().tolist() else: raise ValueError(F"Unsupported framework: {self.framework}" ) __lowercase : List[str] = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(__a , __a ) , key=lambda __a : -x[0] ) ] return result
355
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : Any = get_failure_array(lowerCAmelCase_ ) # 2) Step through text searching for pattern __lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern while i < len(lowerCAmelCase_ ): if pattern[j] == text[i]: if j == (len(lowerCAmelCase_ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase : Optional[Any] = failure[j - 1] continue i += 1 return False def snake_case_ ( lowerCAmelCase_ : str ): __lowercase : List[Any] = [0] __lowercase : Optional[Any] = 0 __lowercase : List[Any] = 1 while j < len(lowerCAmelCase_ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase : List[str] = failure[i - 1] continue j += 1 failure.append(lowerCAmelCase_ ) return failure if __name__ == "__main__": # Test 1) lowerCamelCase : Dict = '''abc1abc12''' lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCamelCase : List[Any] = '''ABABX''' lowerCamelCase : List[Any] = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCamelCase : int = '''AAAB''' lowerCamelCase : Optional[int] = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCamelCase : Optional[Any] = '''abcdabcy''' lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCamelCase : Dict = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
306
0
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' _A : Dict = DebertaTokenizer _A : str = True _A : str = DebertaTokenizerFast def lowerCAmelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowercase : Optional[int] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """[UNK]""", ] __lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) ) __lowercase : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __lowercase : List[Any] = {"""unk_token""": """[UNK]"""} __lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__a ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__a ) ) def lowerCAmelCase ( self : Optional[Any] , **__a : int ) -> List[str]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a ) def lowerCAmelCase ( self : Union[str, Any] , __a : List[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : str = """lower newer""" __lowercase : Optional[int] = """lower newer""" return input_text, output_text def lowerCAmelCase ( self : int ) -> Optional[int]: """simple docstring""" __lowercase : List[str] = self.get_tokenizer() __lowercase : Optional[Any] = """lower newer""" __lowercase : Optional[int] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] __lowercase : Union[str, Any] = tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) __lowercase : Any = tokens + [tokenizer.unk_token] __lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a ) def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase : int = self.get_tokenizer() __lowercase : Optional[Any] = tokenizer("""Hello""" , """World""" ) __lowercase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd["""token_type_ids"""] , __a ) @slow def lowerCAmelCase ( self : Any ) -> int: """simple docstring""" __lowercase : Optional[int] = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" ) __lowercase : List[str] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a ) __lowercase : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a ) __lowercase : int = tokenizer.encode( """sequence builders""" , add_special_tokens=__a , add_prefix_space=__a ) __lowercase : Dict = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a ) __lowercase : Dict = tokenizer.build_inputs_with_special_tokens(__a ) __lowercase : str = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase : Union[str, Any] = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: __lowercase : Dict = tokenizer_class.from_pretrained("""microsoft/deberta-base""" ) __lowercase : List[Any] = [ """ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""", """ALBERT incorporates two parameter reduction techniques""", """The first one is a factorized embedding parameterization. By decomposing the large vocabulary""" """ embedding matrix into two small matrices, we separate the size of the hidden layers from the size of""" """ vocabulary embedding.""", ] __lowercase : Dict = tokenizer(__a , padding=__a ) __lowercase : Union[str, Any] = [tokenizer.decode(__a , skip_special_tokens=__a ) for seq in encoding["""input_ids"""]] # fmt: off __lowercase : Any = { """input_ids""": [ [1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2] ], """token_type_ids""": [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], """attention_mask""": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on __lowercase : int = [ """ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""", """ALBERT incorporates two parameter reduction techniques""", """The first one is a factorized embedding parameterization. By decomposing the large vocabulary""" """ embedding matrix into two small matrices, we separate the size of the hidden layers from the size of""" """ vocabulary embedding.""", ] self.assertDictEqual(encoding.data , __a ) for expected, decoded in zip(__a , __a ): self.assertEqual(__a , __a )
356
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[str] = ['''pixel_values'''] def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None: """simple docstring""" super().__init__(**__a ) __lowercase : Dict = size if size is not None else {"""shortest_edge""": 224} __lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a ) __lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" ) __lowercase : Optional[int] = do_resize __lowercase : Union[str, Any] = size __lowercase : List[Any] = resample __lowercase : Any = do_center_crop __lowercase : Dict = crop_size __lowercase : int = do_rescale __lowercase : Tuple = rescale_factor __lowercase : List[Any] = do_normalize __lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD __lowercase : Union[str, Any] = do_convert_rgb def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray: """simple docstring""" __lowercase : Dict = get_size_dict(__a , default_to_square=__a ) if "shortest_edge" not in size: raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) __lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray: """simple docstring""" __lowercase : Tuple = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a ) def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]: """simple docstring""" return rescale(__a , scale=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray: """simple docstring""" return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image: """simple docstring""" __lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize __lowercase : Dict = size if size is not None else self.size __lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a ) __lowercase : int = resample if resample is not None else self.resample __lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size __lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a ) __lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize __lowercase : Tuple = image_mean if image_mean is not None else self.image_mean __lowercase : str = image_std if image_std is not None else self.image_std __lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowercase : Union[str, Any] = make_list_of_images(__a ) if not valid_images(__a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images] # All transformations expect numpy arrays. __lowercase : Any = [to_numpy_array(__a ) for image in images] if do_resize: __lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images] if do_center_crop: __lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images] if do_rescale: __lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images] if do_normalize: __lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images] __lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images] __lowercase : Optional[int] = {"""pixel_values""": images} return BatchFeature(data=__a , tensor_type=__a )
306
0
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowerCamelCase : Optional[int] = 16 lowerCamelCase : Any = 32 def snake_case_ ( lowerCAmelCase_ : Accelerator , lowerCAmelCase_ : int = 16 , lowerCAmelCase_ : str = "bert-base-cased" ): __lowercase : int = AutoTokenizer.from_pretrained(lowerCAmelCase_ ) __lowercase : Dict = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowerCAmelCase_ : List[str] ): # max_length=None => use the model max length (it's actually the default) __lowercase : str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __lowercase : Any = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCAmelCase_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowercase : str = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCAmelCase_ : Optional[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCAmelCase_ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(lowerCAmelCase_ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. __lowercase : int = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) __lowercase : str = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ): # Initialize accelerator __lowercase : str = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowercase : List[str] = config["""lr"""] __lowercase : List[str] = int(config["""num_epochs"""] ) __lowercase : List[str] = int(config["""seed"""] ) __lowercase : Union[str, Any] = int(config["""batch_size"""] ) __lowercase : Optional[Any] = args.model_name_or_path set_seed(lowerCAmelCase_ ) __lowercase : Tuple = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowercase : int = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) # Instantiate optimizer __lowercase : Optional[int] = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __lowercase : int = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase_ ) if accelerator.state.deepspeed_plugin is not None: __lowercase : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: __lowercase : Optional[int] = 1 __lowercase : int = (len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __lowercase : Dict = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase_ , ) else: __lowercase : Tuple = DummyScheduler(lowerCAmelCase_ , total_num_steps=lowerCAmelCase_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowercase : List[str] = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # We need to keep track of how many total steps we have iterated over __lowercase : Optional[int] = 0 # We also need to keep track of the stating epoch so files are named properly __lowercase : Dict = 0 # Now we train the model __lowercase : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" ) __lowercase : int = 0 __lowercase : Optional[int] = {} for epoch in range(lowerCAmelCase_ , lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): __lowercase : str = model(**lowerCAmelCase_ ) __lowercase : List[str] = outputs.loss __lowercase : Optional[Any] = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() __lowercase : Optional[int] = 0 for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowercase : Dict = model(**lowerCAmelCase_ ) __lowercase : Union[str, Any] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __lowercase : str = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowerCAmelCase_ ) - 1: __lowercase : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] __lowercase : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) __lowercase : str = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , lowerCAmelCase_ ) __lowercase : Optional[int] = eval_metric["""accuracy"""] if best_performance < eval_metric["accuracy"]: __lowercase : Optional[Any] = eval_metric["""accuracy"""] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) def snake_case_ ( ): __lowercase : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=lowerCAmelCase_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCAmelCase_ , ) parser.add_argument( """--output_dir""" , type=lowerCAmelCase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--performance_lower_bound""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , ) parser.add_argument( """--num_epochs""" , type=lowerCAmelCase_ , default=3 , help="""Number of train epochs.""" , ) __lowercase : List[str] = parser.parse_args() __lowercase : str = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
357
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ): __lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ ) return new.join(lowerCAmelCase_ ) def snake_case_ ( lowerCAmelCase_ : List[Any] ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : List[str] = {} __lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." ) if "res_path" in key: __lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" ) if key.endswith(""".w""" ): __lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 ) if key.endswith(""".b""" ): __lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 ) __lowercase : Dict = value.float() return upgrade @torch.no_grad() def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ): from dall_e import Encoder __lowercase : Any = Encoder() if os.path.exists(lowerCAmelCase_ ): __lowercase : List[Any] = torch.load(lowerCAmelCase_ ) else: __lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : int = ckpt.state_dict() encoder.load_state_dict(lowerCAmelCase_ ) if config_path is not None: __lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ ) else: __lowercase : List[str] = FlavaImageCodebookConfig() __lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval() __lowercase : List[Any] = encoder.state_dict() __lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ ) hf_model.load_state_dict(lowerCAmelCase_ ) __lowercase : Dict = hf_model.state_dict() __lowercase : Tuple = count_parameters(lowerCAmelCase_ ) __lowercase : Tuple = count_parameters(lowerCAmelCase_ ) assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(lowerCAmelCase_ ) else: return hf_state_dict if __name__ == "__main__": lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowerCamelCase : Union[str, Any] = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
306
0
import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase ( __a ): '''simple docstring''' def lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" __lowercase : Optional[int] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__a , """embed_dim""" ) ) self.parent.assertTrue(hasattr(__a , """num_heads""" ) ) class lowerCAmelCase : '''simple docstring''' def __init__( self : List[str] , __a : Any , __a : Union[str, Any]=13 , __a : Optional[int]=64 , __a : List[str]=3 , __a : Any=[16, 48, 96] , __a : List[Any]=[1, 3, 6] , __a : Dict=[1, 2, 10] , __a : List[str]=[7, 3, 3] , __a : str=[4, 2, 2] , __a : List[str]=[2, 1, 1] , __a : Tuple=[2, 2, 2] , __a : Optional[Any]=[False, False, True] , __a : int=[0.0, 0.0, 0.0] , __a : Optional[Any]=0.02 , __a : Union[str, Any]=1E-12 , __a : str=True , __a : List[Any]=True , __a : Dict=2 , ) -> List[str]: """simple docstring""" __lowercase : Tuple = parent __lowercase : int = batch_size __lowercase : Union[str, Any] = image_size __lowercase : Optional[Any] = patch_sizes __lowercase : int = patch_stride __lowercase : List[str] = patch_padding __lowercase : int = is_training __lowercase : Optional[int] = use_labels __lowercase : Any = num_labels __lowercase : Optional[int] = num_channels __lowercase : List[str] = embed_dim __lowercase : str = num_heads __lowercase : List[Any] = stride_kv __lowercase : Optional[Any] = depth __lowercase : Tuple = cls_token __lowercase : Union[str, Any] = attention_drop_rate __lowercase : Union[str, Any] = initializer_range __lowercase : List[str] = layer_norm_eps def lowerCAmelCase ( self : Optional[int] ) -> int: """simple docstring""" __lowercase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase : Any = None if self.use_labels: __lowercase : Dict = ids_tensor([self.batch_size] , self.num_labels ) __lowercase : Union[str, Any] = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self : str ) -> Dict: """simple docstring""" return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self : str , __a : Optional[Any] , __a : List[Any] , __a : List[Any] ) -> int: """simple docstring""" __lowercase : Union[str, Any] = CvtModel(config=__a ) model.to(__a ) model.eval() __lowercase : Optional[int] = model(__a ) __lowercase : Union[str, Any] = (self.image_size, self.image_size) __lowercase : Tuple = image_size[0], image_size[1] for i in range(len(self.depth ) ): __lowercase : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __lowercase : Tuple = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def lowerCAmelCase ( self : Any , __a : Tuple , __a : Optional[int] , __a : str ) -> Optional[int]: """simple docstring""" __lowercase : Any = self.num_labels __lowercase : Tuple = CvtForImageClassification(__a ) model.to(__a ) model.eval() __lowercase : Optional[int] = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" __lowercase : List[str] = self.prepare_config_and_inputs() __lowercase : Any = config_and_inputs __lowercase : Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase ( __a , __a , unittest.TestCase ): '''simple docstring''' _A : Optional[int] = (CvtModel, CvtForImageClassification) if is_torch_available() else () _A : Dict = ( {'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification} if is_torch_available() else {} ) _A : List[str] = False _A : Optional[Any] = False _A : Optional[Any] = False _A : Tuple = False _A : List[str] = False def lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" __lowercase : Any = CvtModelTester(self ) __lowercase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" return @unittest.skip(reason="""Cvt does not output attentions""" ) def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" pass @unittest.skip(reason="""Cvt does not use inputs_embeds""" ) def lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" pass @unittest.skip(reason="""Cvt does not support input and output embeddings""" ) def lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" pass def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : List[str] = model_class(__a ) __lowercase : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : Any = [*signature.parameters.keys()] __lowercase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" __lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" def check_hidden_states_output(__a : Tuple , __a : List[Any] , __a : Union[str, Any] ): __lowercase : Any = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): __lowercase : str = model(**self._prepare_for_class(__a , __a ) ) __lowercase : Any = outputs.hidden_states __lowercase : List[str] = len(self.model_tester.depth ) self.assertEqual(len(__a ) , __a ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Optional[int] = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase : List[Any] = True check_hidden_states_output(__a , __a , __a ) def lowerCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : Optional[Any] ) -> Dict: """simple docstring""" pass @slow def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : str = CvtModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def snake_case_ ( ): __lowercase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" __lowercase : List[str] = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__a ) __lowercase : List[str] = self.default_image_processor __lowercase : List[str] = prepare_img() __lowercase : List[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): __lowercase : Union[str, Any] = model(**__a ) # verify the logits __lowercase : Union[str, Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) __lowercase : Optional[Any] = torch.tensor([0.9285, 0.9015, -0.3150] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
358
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging lowerCamelCase : Tuple = logging.get_logger(__name__) logging.set_verbosity_info() def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): if "xprophetnet" in prophetnet_checkpoint_path: __lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ ) __lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ ) else: __lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ ) __lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ ) __lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""] __lowercase : Optional[int] = { """self_attn""": """ngram_self_attn""", """cross_attn""": """encoder_attn""", """cross_attn_layer_norm""": """encoder_attn_layer_norm""", """feed_forward_layer_norm""": """final_layer_norm""", """feed_forward""": """""", """intermediate""": """fc1""", """output""": """fc2""", """key_proj""": """k_proj""", """query_proj""": """q_proj""", """value_proj""": """v_proj""", """word_embeddings""": """embed_tokens""", """embeddings_layer_norm""": """emb_layer_norm""", """relative_pos_embeddings""": """relative_linear""", """ngram_embeddings""": """ngram_input_embed""", """position_embeddings""": """embed_positions""", } for key in loading_info["missing_keys"]: __lowercase : Tuple = key.split(""".""" ) if attributes[0] == "lm_head": __lowercase : str = prophet __lowercase : List[str] = prophet_old else: __lowercase : Tuple = prophet.prophetnet __lowercase : Union[str, Any] = prophet_old.model __lowercase : Optional[Any] = False for attribute in attributes: if attribute in mapping: __lowercase : Optional[int] = mapping[attribute] if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0: __lowercase : str = attribute elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : List[Any] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" __lowercase : Any = old_model.weight logger.info(F"{attribute} is initialized." ) __lowercase : Any = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" __lowercase : Dict = old_model.bias logger.info(F"{attribute} is initialized" ) __lowercase : int = True break elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ): __lowercase : Dict = old_model.in_proj_weight.shape[0] // 3 __lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": __lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) __lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": __lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) __lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": __lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) __lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) __lowercase : int = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." __lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] ) __lowercase : int = True break if attribute.isdigit(): __lowercase : Tuple = model[int(lowerCAmelCase_ )] __lowercase : int = old_model[int(lowerCAmelCase_ )] else: __lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if old_attribute == "": __lowercase : int = old_model else: if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError(F"{old_model} does not have {old_attribute}" ) __lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if not is_key_init: raise ValueError(F"{key} was not correctly initialized!" ) print(F"Saving model to {pytorch_dump_folder_path}" ) prophet.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCamelCase : Any = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
306
0
"""simple docstring""" import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowerCamelCase : Optional[Any] = '''src/diffusers''' lowerCamelCase : List[str] = '''.''' # This is to make sure the diffusers module imported is the one in the repo. lowerCamelCase : Tuple = importlib.util.spec_from_file_location( '''diffusers''', os.path.join(DIFFUSERS_PATH, '''__init__.py'''), submodule_search_locations=[DIFFUSERS_PATH], ) lowerCamelCase : List[Any] = spec.loader.load_module() def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ): return line.startswith(lowerCAmelCase_ ) or len(lowerCAmelCase_ ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , lowerCAmelCase_ ) is not None def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : Union[str, Any] = object_name.split(""".""" ) __lowercase : Any = 0 # First let's find the module where our object lives. __lowercase : List[Any] = parts[i] while i < len(lowerCAmelCase_ ) and not os.path.isfile(os.path.join(lowerCAmelCase_ , F"{module}.py" ) ): i += 1 if i < len(lowerCAmelCase_ ): __lowercase : List[str] = os.path.join(lowerCAmelCase_ , parts[i] ) if i >= len(lowerCAmelCase_ ): raise ValueError(F"`object_name` should begin with the name of a module of diffusers but got {object_name}." ) with open(os.path.join(lowerCAmelCase_ , F"{module}.py" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __lowercase : Optional[int] = f.readlines() # Now let's find the class / func in the code! __lowercase : List[str] = """""" __lowercase : Dict = 0 for name in parts[i + 1 :]: while ( line_index < len(lowerCAmelCase_ ) and re.search(rF"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(lowerCAmelCase_ ): raise ValueError(F" {object_name} does not match any function or class in {module}." ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __lowercase : List[Any] = line_index while line_index < len(lowerCAmelCase_ ) and _should_continue(lines[line_index] , lowerCAmelCase_ ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __lowercase : Tuple = lines[start_index:line_index] return "".join(lowerCAmelCase_ ) lowerCamelCase : int = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''') lowerCamelCase : Any = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''') lowerCamelCase : Any = re.compile(r'''<FILL\s+[^>]*>''') def snake_case_ ( lowerCAmelCase_ : Any ): __lowercase : int = code.split("""\n""" ) __lowercase : str = 0 while idx < len(lowerCAmelCase_ ) and len(lines[idx] ) == 0: idx += 1 if idx < len(lowerCAmelCase_ ): return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0] return "" def snake_case_ ( lowerCAmelCase_ : List[Any] ): __lowercase : Union[str, Any] = len(get_indent(lowerCAmelCase_ ) ) > 0 if has_indent: __lowercase : Optional[Any] = F"class Bla:\n{code}" __lowercase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCAmelCase_ ) __lowercase : Optional[Any] = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ ) __lowercase : Optional[Any] = style_docstrings_in_code(lowerCAmelCase_ ) return result[len("""class Bla:\n""" ) :] if has_indent else result def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]=False ): with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __lowercase : List[Any] = f.readlines() __lowercase : Optional[int] = [] __lowercase : str = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(lowerCAmelCase_ ): __lowercase : str = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __lowercase : List[str] = search.groups() __lowercase : int = find_code_in_diffusers(lowerCAmelCase_ ) __lowercase : Dict = get_indent(lowerCAmelCase_ ) __lowercase : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2 __lowercase : Any = theoretical_indent __lowercase : List[str] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __lowercase : Any = True while line_index < len(lowerCAmelCase_ ) and should_continue: line_index += 1 if line_index >= len(lowerCAmelCase_ ): break __lowercase : int = lines[line_index] __lowercase : Union[str, Any] = _should_continue(lowerCAmelCase_ , lowerCAmelCase_ ) and re.search(F"^{indent}# End copy" , lowerCAmelCase_ ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __lowercase : Optional[int] = lines[start_index:line_index] __lowercase : Dict = """""".join(lowerCAmelCase_ ) # Remove any nested `Copied from` comments to avoid circular copies __lowercase : Union[str, Any] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(lowerCAmelCase_ ) is None] __lowercase : Optional[Any] = """\n""".join(lowerCAmelCase_ ) # Before comparing, use the `replace_pattern` on the original code. if len(lowerCAmelCase_ ) > 0: __lowercase : Tuple = replace_pattern.replace("""with""" , """""" ).split(""",""" ) __lowercase : int = [_re_replace_pattern.search(lowerCAmelCase_ ) for p in patterns] for pattern in patterns: if pattern is None: continue __lowercase : Dict = pattern.groups() __lowercase : Optional[Any] = re.sub(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if option.strip() == "all-casing": __lowercase : Optional[Any] = re.sub(obja.lower() , obja.lower() , lowerCAmelCase_ ) __lowercase : List[str] = re.sub(obja.upper() , obja.upper() , lowerCAmelCase_ ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __lowercase : List[Any] = blackify(lines[start_index - 1] + theoretical_code ) __lowercase : Optional[int] = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __lowercase : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] __lowercase : Optional[int] = start_index + 1 if overwrite and len(lowerCAmelCase_ ) > 0: # Warn the user a file has been modified. print(F"Detected changes, rewriting {filename}." ) with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lowerCAmelCase_ ) return diffs def snake_case_ ( lowerCAmelCase_ : bool = False ): __lowercase : Dict = glob.glob(os.path.join(lowerCAmelCase_ , """**/*.py""" ) , recursive=lowerCAmelCase_ ) __lowercase : List[str] = [] for filename in all_files: __lowercase : Any = is_copy_consistent(lowerCAmelCase_ , lowerCAmelCase_ ) diffs += [F"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs] if not overwrite and len(lowerCAmelCase_ ) > 0: __lowercase : Dict = """\n""".join(lowerCAmelCase_ ) raise Exception( """Found the following copy inconsistencies:\n""" + diff + """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" ) if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') lowerCamelCase : Optional[Any] = parser.parse_args() check_copies(args.fix_and_overwrite)
359
def snake_case_ ( lowerCAmelCase_ : int = 200 ): __lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200] __lowercase : List[str] = [0] * (pence + 1) __lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowerCAmelCase_ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_00) == 7_36_82
306
0
def _lowerCAmelCase ( lowerCAmelCase_ : int ): if number > 0: raise ValueError("""input must be a negative integer""" ) __lowercase : Optional[Any] = len(bin(lowerCAmelCase_ )[3:] ) __lowercase : int = bin(abs(lowerCAmelCase_ ) - (1 << binary_number_length) )[3:] __lowercase : Optional[int] = ( ( """1""" + """0""" * (binary_number_length - len(lowerCAmelCase_ )) + twos_complement_number ) if number < 0 else """0""" ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
360
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any: """simple docstring""" __lowercase : Optional[int] = parent __lowercase : List[str] = out_indices if out_indices is not None else [4] __lowercase : Optional[int] = stage_names __lowercase : Any = out_features __lowercase : Optional[Any] = backbone __lowercase : Optional[Any] = batch_size __lowercase : Union[str, Any] = image_size __lowercase : List[str] = num_channels __lowercase : str = use_pretrained_backbone __lowercase : str = is_training def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" __lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase : str = self.get_config() return config, pixel_values def lowerCAmelCase ( self : int ) -> str: """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict: """simple docstring""" __lowercase : Dict = TimmBackbone(config=__a ) model.to(__a ) model.eval() with torch.no_grad(): __lowercase : Optional[Any] = model(__a ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def lowerCAmelCase ( self : Any ) -> int: """simple docstring""" __lowercase : Union[str, Any] = self.prepare_config_and_inputs() __lowercase , __lowercase : str = config_and_inputs __lowercase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch @require_timm class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ): '''simple docstring''' _A : List[Any] = (TimmBackbone,) if is_torch_available() else () _A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {} _A : List[Any] = False _A : List[str] = False _A : Any = False _A : Optional[Any] = False def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase : str = TimmBackboneModelTester(self ) __lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a ) def lowerCAmelCase ( self : Any ) -> str: """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" __lowercase : Tuple = """resnet18""" __lowercase : Optional[int] = """microsoft/resnet-18""" __lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a ) __lowercase : Dict = AutoBackbone.from_pretrained(__a ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] ) __lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip("""TimmBackbone doesn't support feed forward chunking""" ) def lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" ) def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip("""TimmBackbone initialization is managed on the timm side""" ) def lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" ) def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" ) def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" pass @unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" ) def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" pass @unittest.skip("""model weights aren't tied in TimmBackbone.""" ) def lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" pass @unittest.skip("""model weights aren't tied in TimmBackbone.""" ) def lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : List[Any] ) -> Tuple: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" ) def lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't support output_attentions.""" ) def lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Safetensors is not supported by timm.""" ) def lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" pass def lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" __lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Optional[Any] = model_class(__a ) __lowercase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : List[str] = [*signature.parameters.keys()] __lowercase : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def lowerCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" __lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : Optional[Any] = True __lowercase : Union[str, Any] = self.has_attentions # no need to test all models as different heads yield the same functionality __lowercase : Union[str, Any] = self.all_model_classes[0] __lowercase : List[Any] = model_class(__a ) model.to(__a ) __lowercase : Optional[Any] = self._prepare_for_class(__a , __a ) __lowercase : Union[str, Any] = model(**__a ) __lowercase : Optional[int] = outputs[0][-1] # Encoder-/Decoder-only models __lowercase : Any = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __lowercase : Optional[int] = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__a ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : List[str] = model_class(__a ) model.to(__a ) model.eval() __lowercase : int = model(**__a ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __lowercase : Any = copy.deepcopy(__a ) __lowercase : Dict = None __lowercase : Tuple = model_class(__a ) model.to(__a ) model.eval() __lowercase : Optional[int] = model(**__a ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __lowercase : List[str] = copy.deepcopy(__a ) __lowercase : Optional[Any] = False __lowercase : str = model_class(__a ) model.to(__a ) model.eval() __lowercase : List[Any] = model(**__a )
306
0
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ): if b == 0: return (1, 0) (__lowercase) : List[Any] = extended_euclid(lowerCAmelCase_ , a % b ) __lowercase : int = a // b return (y, x - k * y) def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ): (__lowercase) : Optional[Any] = extended_euclid(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Optional[Any] = na * na __lowercase : str = ra * x * na + ra * y * na return (n % m + m) % m def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ): (__lowercase) : int = extended_euclid(lowerCAmelCase_ , lowerCAmelCase_ ) if b < 0: __lowercase : Union[str, Any] = (b % n + n) % n return b def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ): __lowercase : Union[str, Any] = invert_modulo(lowerCAmelCase_ , lowerCAmelCase_ ), invert_modulo(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : str = na * na __lowercase : Dict = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='''chinese_remainder_theorem''', verbose=True) testmod(name='''chinese_remainder_theorem2''', verbose=True) testmod(name='''invert_modulo''', verbose=True) testmod(name='''extended_euclid''', verbose=True)
361
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase : Optional[int] = logging.get_logger(__name__) lowerCamelCase : str = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } lowerCamelCase : Optional[Any] = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ): for attribute in key.split(""".""" ): __lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if weight_type is not None: __lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape else: __lowercase : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __lowercase : Dict = value elif weight_type == "weight_g": __lowercase : Union[str, Any] = value elif weight_type == "weight_v": __lowercase : List[Any] = value elif weight_type == "bias": __lowercase : int = value elif weight_type == "running_mean": __lowercase : List[Any] = value elif weight_type == "running_var": __lowercase : int = value elif weight_type == "num_batches_tracked": __lowercase : int = value elif weight_type == "inv_freq": __lowercase : Optional[Any] = value else: __lowercase : Any = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ): __lowercase : str = [] __lowercase : Any = fairseq_model.state_dict() __lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): __lowercase : Optional[Any] = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , ) __lowercase : List[str] = True else: for key, mapped_key in MAPPING.items(): __lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __lowercase : Tuple = True if "*" in mapped_key: __lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2] __lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ ) if "pos_bias_u" in name: __lowercase : Any = None elif "pos_bias_v" in name: __lowercase : Tuple = None elif "weight_g" in name: __lowercase : Union[str, Any] = """weight_g""" elif "weight_v" in name: __lowercase : Dict = """weight_v""" elif "bias" in name: __lowercase : Union[str, Any] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowercase : str = """weight""" elif "running_mean" in name: __lowercase : str = """running_mean""" elif "inv_freq" in name: __lowercase : List[Any] = """inv_freq""" elif "running_var" in name: __lowercase : Any = """running_var""" elif "num_batches_tracked" in name: __lowercase : Any = """num_batches_tracked""" else: __lowercase : Optional[int] = None set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) continue if not is_used: unused_weights.append(lowerCAmelCase_ ) logger.warning(F"Unused weights: {unused_weights}" ) def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ): __lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1] __lowercase : int = name.split(""".""" ) __lowercase : Optional[Any] = int(items[0] ) __lowercase : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __lowercase : Union[str, Any] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __lowercase : List[str] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) __lowercase : Union[str, Any] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) __lowercase : Dict = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(lowerCAmelCase_ ) @torch.no_grad() def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ): if config_path is not None: __lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" ) else: __lowercase : List[Any] = WavaVecaConformerConfig() if "rope" in checkpoint_path: __lowercase : Tuple = """rotary""" if is_finetuned: if dict_path: __lowercase : Any = Dictionary.load(lowerCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowercase : List[Any] = target_dict.pad_index __lowercase : Optional[int] = target_dict.bos_index __lowercase : List[Any] = target_dict.eos_index __lowercase : List[str] = len(target_dict.symbols ) __lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" ) if not os.path.isdir(lowerCAmelCase_ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) ) return os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) __lowercase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched __lowercase : int = 0 __lowercase : Any = 1 with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Dict = WavaVecaCTCTokenizer( lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , ) __lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False __lowercase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) __lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) __lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ ) else: __lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ ) if is_finetuned: __lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" ) __lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ ) __lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ ) __lowercase : Dict = model[0].eval() recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned ) hf_wavavec.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": lowerCamelCase : int = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) lowerCamelCase : Any = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
306
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase : Optional[Any] = { '''configuration_poolformer''': [ '''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PoolFormerConfig''', '''PoolFormerOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = ['''PoolFormerFeatureExtractor'''] lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[str] = [ '''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PoolFormerForImageClassification''', '''PoolFormerModel''', '''PoolFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
362
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError("""String lengths must match!""" ) __lowercase : str = 0 for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
306
0
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel lowerCamelCase : Union[str, Any] = '''0.12''' # assumed parallelism: 8 @require_flax @is_staging_test class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def lowerCAmelCase ( cls : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase : str = TOKEN HfFolder.save_token(__a ) @classmethod def lowerCAmelCase ( cls : Any ) -> List[Any]: """simple docstring""" try: delete_repo(token=cls._token , repo_id="""test-model-flax""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" ) except HTTPError: pass def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : Tuple = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) __lowercase : str = FlaxBertModel(__a ) model.push_to_hub("""test-model-flax""" , use_auth_token=self._token ) __lowercase : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" ) __lowercase : str = flatten_dict(unfreeze(model.params ) ) __lowercase : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __lowercase : List[str] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__a , 1E-3 , msg=F"{key} not identical" ) # Reset repo delete_repo(token=self._token , repo_id="""test-model-flax""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__a , repo_id="""test-model-flax""" , push_to_hub=__a , use_auth_token=self._token ) __lowercase : Dict = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" ) __lowercase : Dict = flatten_dict(unfreeze(model.params ) ) __lowercase : List[str] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __lowercase : List[Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__a , 1E-3 , msg=F"{key} not identical" ) def lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" __lowercase : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) __lowercase : Tuple = FlaxBertModel(__a ) model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token ) __lowercase : Optional[int] = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" ) __lowercase : Optional[Any] = flatten_dict(unfreeze(model.params ) ) __lowercase : Dict = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __lowercase : Dict = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__a , 1E-3 , msg=F"{key} not identical" ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __a , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=__a , use_auth_token=self._token ) __lowercase : Dict = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" ) __lowercase : Any = flatten_dict(unfreeze(model.params ) ) __lowercase : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __lowercase : str = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__a , 1E-3 , msg=F"{key} not identical" ) def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] ): __lowercase : Dict = True __lowercase : Optional[Any] = flatten_dict(modela.params ) __lowercase : Union[str, Any] = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4: __lowercase : Dict = False return models_are_equal @require_flax class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" __lowercase : Union[str, Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) __lowercase : List[Any] = FlaxBertModel(__a ) __lowercase : List[str] = """bert""" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__a , __a ) ) with self.assertRaises(__a ): __lowercase : List[Any] = FlaxBertModel.from_pretrained(__a ) __lowercase : Any = FlaxBertModel.from_pretrained(__a , subfolder=__a ) self.assertTrue(check_models_equal(__a , __a ) ) def lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" __lowercase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) __lowercase : Optional[int] = FlaxBertModel(__a ) __lowercase : Optional[Any] = """bert""" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__a , __a ) , max_shard_size="""10KB""" ) with self.assertRaises(__a ): __lowercase : List[str] = FlaxBertModel.from_pretrained(__a ) __lowercase : str = FlaxBertModel.from_pretrained(__a , subfolder=__a ) self.assertTrue(check_models_equal(__a , __a ) ) def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase : str = """bert""" __lowercase : Union[str, Any] = """hf-internal-testing/tiny-random-bert-subfolder""" with self.assertRaises(__a ): __lowercase : List[str] = FlaxBertModel.from_pretrained(__a ) __lowercase : List[Any] = FlaxBertModel.from_pretrained(__a , subfolder=__a ) self.assertIsNotNone(__a ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = """bert""" __lowercase : str = """hf-internal-testing/tiny-random-bert-sharded-subfolder""" with self.assertRaises(__a ): __lowercase : Tuple = FlaxBertModel.from_pretrained(__a ) __lowercase : Union[str, Any] = FlaxBertModel.from_pretrained(__a , subfolder=__a ) self.assertIsNotNone(__a )
363
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def snake_case_ ( lowerCAmelCase_ : Tuple ): if isinstance(lowerCAmelCase_ , collections.abc.Iterable ): return x return (x, x) @require_flax class lowerCAmelCase : '''simple docstring''' def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]: """simple docstring""" pass def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" pass def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" pass def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]: """simple docstring""" __lowercase : List[str] = np.abs((a - b) ).max() self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." ) def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : str = FlaxVisionTextDualEncoderModel(__a ) __lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str: """simple docstring""" __lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a ) __lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a ) __lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) __lowercase : int = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__a ) __lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a ) __lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) __lowercase : int = after_output[0] __lowercase : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__a , 1E-3 ) def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : str = self.get_vision_text_model(__a , __a ) __lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : Union[str, Any] = model( input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a ) __lowercase : Optional[int] = output.vision_model_output.attentions self.assertEqual(len(__a ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase : Optional[int] = to_atuple(vision_model.config.image_size ) __lowercase : List[str] = to_atuple(vision_model.config.patch_size ) __lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase : int = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase : Dict = output.text_model_output.attentions self.assertEqual(len(__a ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]: """simple docstring""" pt_model.to(__a ) pt_model.eval() # prepare inputs __lowercase : Union[str, Any] = inputs_dict __lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): __lowercase : Union[str, Any] = pt_model(**__a ).to_tuple() __lowercase : Tuple = fx_model(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__a ) __lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a ) __lowercase : Dict = fx_model_loaded(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__a ) __lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a ) pt_model_loaded.to(__a ) pt_model_loaded.eval() with torch.no_grad(): __lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 ) def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : str = VisionTextDualEncoderModel(__a ) __lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a ) __lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a ) __lowercase : Any = fx_state self.check_pt_flax_equivalence(__a , __a , __a ) def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str: """simple docstring""" __lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a ) __lowercase : Dict = FlaxVisionTextDualEncoderModel(__a ) __lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params ) self.check_pt_flax_equivalence(__a , __a , __a ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" __lowercase : Optional[Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__a ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : int = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__a ) def lowerCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" __lowercase : List[str] = self.prepare_config_and_inputs() self.check_save_load(**__a ) def lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" __lowercase : str = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__a ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[str] ) -> Tuple: """simple docstring""" __lowercase : Optional[Any] = self.prepare_config_and_inputs() __lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" ) __lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" ) __lowercase : Dict = config_inputs_dict self.check_equivalence_pt_to_flax(__a , __a , __a ) self.check_equivalence_flax_to_pt(__a , __a , __a ) @slow def lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs() __lowercase : Dict = model_a(**__a ) __lowercase : Any = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__a ) __lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a ) __lowercase : Optional[int] = model_a(**__a ) __lowercase : Tuple = after_outputs[0] __lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__a , 1E-5 ) @require_flax class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" __lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , ) __lowercase : int = 13 __lowercase : Union[str, Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __lowercase : Tuple = random_attention_mask([batch_size, 4] ) __lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict: """simple docstring""" __lowercase : int = FlaxViTModel(__a ) __lowercase : List[Any] = FlaxBertModel(__a ) return vision_model, text_model def lowerCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = FlaxViTModelTester(self ) __lowercase : str = FlaxBertModelTester(self ) __lowercase : List[str] = vit_model_tester.prepare_config_and_inputs() __lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase : Optional[int] = vision_config_and_inputs __lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , ) __lowercase : Tuple = 13 __lowercase : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __lowercase : List[Any] = random_attention_mask([batch_size, 4] ) __lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any: """simple docstring""" __lowercase : Dict = FlaxCLIPVisionModel(__a ) __lowercase : Optional[Any] = FlaxBertModel(__a ) return vision_model, text_model def lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" __lowercase : List[Any] = FlaxCLIPVisionModelTester(self ) __lowercase : Optional[Any] = FlaxBertModelTester(self ) __lowercase : Any = clip_model_tester.prepare_config_and_inputs() __lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase : Dict = vision_config_and_inputs __lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" __lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) __lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) __lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __lowercase : Tuple = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" ) __lowercase : Optional[int] = model(**__a ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
306
0
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''', datefmt='''%Y-%m-%d %H:%M:%S''', level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(), stream=sys.stdout, ) lowerCamelCase : List[str] = logging.getLogger(__name__) lowerCamelCase : str = {'''facebook/bart-base''': BartForConditionalGeneration} lowerCamelCase : Optional[Any] = {'''facebook/bart-base''': BartTokenizer} def snake_case_ ( ): __lowercase : List[Any] = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" ) parser.add_argument( """--validation_file""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""A csv or a json file containing the validation data.""" ) parser.add_argument( """--max_length""" , type=lowerCAmelCase_ , default=5 , help="""The maximum total input sequence length after tokenization.""" , ) parser.add_argument( """--num_beams""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help=( """Number of beams to use for evaluation. This argument will be """ """passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.""" ) , ) parser.add_argument( """--model_name_or_path""" , type=lowerCAmelCase_ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCAmelCase_ , ) parser.add_argument( """--config_name""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""Pretrained config name or path if not the same as model_name""" , ) parser.add_argument( """--device""" , type=lowerCAmelCase_ , default="""cpu""" , help="""Device where the model will be run""" , ) parser.add_argument("""--output_file_path""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""Where to store the final ONNX file.""" ) __lowercase : Optional[int] = parser.parse_args() return args def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]="cpu" ): __lowercase : List[Any] = model_dict[model_name].from_pretrained(lowerCAmelCase_ ).to(lowerCAmelCase_ ) __lowercase : List[Any] = tokenizer_dict[model_name].from_pretrained(lowerCAmelCase_ ) if model_name in ["facebook/bart-base"]: __lowercase : str = 0 __lowercase : Dict = None __lowercase : List[str] = 0 return huggingface_model, tokenizer def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] ): model.eval() __lowercase : Dict = None __lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(lowerCAmelCase_ ) ) with torch.no_grad(): __lowercase : Optional[Any] = """My friends are cool but they eat too many carbs.""" __lowercase : Tuple = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="""pt""" ).to(model.device ) __lowercase : Tuple = model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=lowerCAmelCase_ , max_length=lowerCAmelCase_ , early_stopping=lowerCAmelCase_ , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( lowerCAmelCase_ , ( inputs["""input_ids"""], inputs["""attention_mask"""], num_beams, max_length, model.config.decoder_start_token_id, ) , lowerCAmelCase_ , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={ """input_ids""": {0: """batch""", 1: """seq"""}, """output_ids""": {0: """batch""", 1: """seq_out"""}, } , example_outputs=lowerCAmelCase_ , ) logger.info("""Model exported to {}""".format(lowerCAmelCase_ ) ) __lowercase : Tuple = remove_dup_initializers(os.path.abspath(lowerCAmelCase_ ) ) logger.info("""Deduplicated and optimized model written to {}""".format(lowerCAmelCase_ ) ) __lowercase : int = onnxruntime.InferenceSession(lowerCAmelCase_ ) __lowercase : str = ort_sess.run( lowerCAmelCase_ , { """input_ids""": inputs["""input_ids"""].cpu().numpy(), """attention_mask""": inputs["""attention_mask"""].cpu().numpy(), """num_beams""": np.array(lowerCAmelCase_ ), """max_length""": np.array(lowerCAmelCase_ ), """decoder_start_token_id""": np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 ) logger.info("""Model outputs from torch and ONNX Runtime are similar.""" ) logger.info("""Success.""" ) def snake_case_ ( ): __lowercase : int = parse_args() __lowercase : int = 5 __lowercase : List[str] = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() __lowercase : Any = torch.device(args.device ) __lowercase : Tuple = load_model_tokenizer(args.model_name_or_path , lowerCAmelCase_ ) if model.config.decoder_start_token_id is None: raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" ) model.to(lowerCAmelCase_ ) if args.max_length: __lowercase : Optional[Any] = args.max_length if args.num_beams: __lowercase : Optional[Any] = args.num_beams if args.output_file_path: __lowercase : List[Any] = args.output_file_path else: __lowercase : str = """BART.onnx""" logger.info("""Exporting model to ONNX""" ) export_and_validate_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
364
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
306
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCamelCase : List[str] = 16 lowerCamelCase : List[str] = 32 def snake_case_ ( lowerCAmelCase_ : Accelerator , lowerCAmelCase_ : int = 16 ): __lowercase : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __lowercase : Optional[int] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowerCAmelCase_ : int ): # max_length=None => use the model max length (it's actually the default) __lowercase : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowercase : List[str] = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowercase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCAmelCase_ : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowercase : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowercase : List[Any] = 16 elif accelerator.mixed_precision != "no": __lowercase : Optional[Any] = 8 else: __lowercase : int = None return tokenizer.pad( lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , ) # Instantiate dataloaders. __lowercase : List[str] = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) __lowercase : Optional[Any] = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811 def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ): # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCAmelCase_ ) == "1": __lowercase : Tuple = 2 # New Code # __lowercase : Any = int(args.gradient_accumulation_steps ) __lowercase : Dict = int(args.local_sgd_steps ) # Initialize accelerator __lowercase : List[str] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCAmelCase_ ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowercase : Dict = config["""lr"""] __lowercase : Union[str, Any] = int(config["""num_epochs"""] ) __lowercase : Union[str, Any] = int(config["""seed"""] ) __lowercase : List[str] = int(config["""batch_size"""] ) __lowercase : Any = evaluate.load("""glue""" , """mrpc""" ) set_seed(lowerCAmelCase_ ) __lowercase : List[str] = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowercase : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowercase : Any = model.to(accelerator.device ) # Instantiate optimizer __lowercase : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase_ ) # Instantiate scheduler __lowercase : List[str] = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowercase : Any = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Now we train the model for epoch in range(lowerCAmelCase_ ): model.train() with LocalSGD( accelerator=lowerCAmelCase_ , model=lowerCAmelCase_ , local_sgd_steps=lowerCAmelCase_ , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowerCAmelCase_ ): __lowercase : List[str] = model(**lowerCAmelCase_ ) __lowercase : Optional[int] = output.loss accelerator.backward(lowerCAmelCase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowercase : List[Any] = model(**lowerCAmelCase_ ) __lowercase : Tuple = outputs.logits.argmax(dim=-1 ) __lowercase : List[str] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) __lowercase : Optional[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , lowerCAmelCase_ ) def snake_case_ ( ): __lowercase : Dict = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=lowerCAmelCase_ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument( """--local_sgd_steps""" , type=lowerCAmelCase_ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) __lowercase : int = parser.parse_args() __lowercase : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
365
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''') # parameters used in DuplicationIndex lowerCamelCase : Union[str, Any] = 10 lowerCamelCase : List[str] = 2_56 def snake_case_ ( lowerCAmelCase_ : List[str] ): if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS: return None __lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ ) for token in set(lowerCAmelCase_ ): min_hash.update(token.encode() ) return min_hash def snake_case_ ( lowerCAmelCase_ : str ): return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0} class lowerCAmelCase : '''simple docstring''' def __init__( self : List[str] , *, __a : float = 0.85 , ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[Any] = duplication_jaccard_threshold __lowercase : Optional[Any] = NUM_PERM __lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) __lowercase : List[str] = defaultdict(__a ) def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None: """simple docstring""" __lowercase : List[Any] = self._index.query(__a ) if code_key in self._index.keys: print(F"Duplicate key {code_key}" ) return self._index.insert(__a , __a ) if len(__a ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(__a ) break else: self._duplicate_clusters[close_duplicates[0]].add(__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]: """simple docstring""" __lowercase : Dict = [] for base, duplicates in self._duplicate_clusters.items(): __lowercase : List[str] = [base] + list(__a ) # reformat the cluster to be a list of dict __lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster] duplicate_clusters.append(__a ) return duplicate_clusters def lowerCAmelCase ( self : Any , __a : int ) -> None: """simple docstring""" __lowercase : Tuple = self.get_duplicate_clusters() with open(__a , """w""" ) as f: json.dump(__a , __a ) def snake_case_ ( lowerCAmelCase_ : str ): __lowercase , __lowercase : Union[str, Any] = element __lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ): with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ): if data is not None: yield data def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ): __lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ): di.add(lowerCAmelCase_ , lowerCAmelCase_ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : List[str] = get_tokens(lowerCAmelCase_ ) __lowercase : Dict = get_tokens(lowerCAmelCase_ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) lowerCamelCase : List[str] = None def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ): __lowercase : Union[str, Any] = [] for elementa in cluster: __lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""] for elementa in extremes: __lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""] if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold: elementa["copies"] += 1 break else: __lowercase : Dict = 1 extremes.append(lowerCAmelCase_ ) return extremes def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ): global _shared_dataset __lowercase : Tuple = dataset __lowercase : Optional[int] = [] __lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ): extremes_list.append(lowerCAmelCase_ ) return extremes_list def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ): __lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster} __lowercase : int = {} __lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for extremes in extremes_clusters: for element in extremes: __lowercase : Optional[Any] = element __lowercase : int = duplicate_indices - set(extreme_dict.keys() ) __lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: __lowercase : List[str] = element["""base_index"""] in extreme_dict if element["is_extreme"]: __lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""] print(F"Original dataset size: {len(lowerCAmelCase_ )}" ) print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" ) print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" ) print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" ) print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" ) return ds_filter, duplicate_clusters
306
0
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' _A : List[Any] = TextToVideoSDPipeline _A : Optional[Any] = TEXT_TO_IMAGE_PARAMS _A : int = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. _A : Any = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def lowerCAmelCase ( self : int ) -> str: """simple docstring""" torch.manual_seed(0 ) __lowercase : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , ) __lowercase : Optional[Any] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) __lowercase : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __lowercase : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) __lowercase : Union[str, Any] = CLIPTextModel(__a ) __lowercase : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __lowercase : Tuple = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : int=0 ) -> Tuple: """simple docstring""" if str(__a ).startswith("""mps""" ): __lowercase : Optional[int] = torch.manual_seed(__a ) else: __lowercase : int = torch.Generator(device=__a ).manual_seed(__a ) __lowercase : List[Any] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" __lowercase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator __lowercase : List[str] = self.get_dummy_components() __lowercase : Tuple = TextToVideoSDPipeline(**__a ) __lowercase : Union[str, Any] = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) __lowercase : Optional[int] = self.get_dummy_inputs(__a ) __lowercase : Dict = """np""" __lowercase : List[Any] = sd_pipe(**__a ).frames __lowercase : List[str] = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) __lowercase : int = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase ( self : Optional[int] ) -> Any: """simple docstring""" self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__a , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowerCAmelCase ( self : Any ) -> int: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a , expected_max_diff=1E-2 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def lowerCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" pass def lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" return super().test_progress_bar() @slow @skip_mps class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : str ) -> Any: """simple docstring""" __lowercase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" ) __lowercase : Optional[Any] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) __lowercase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) __lowercase : Dict = pipe.to("""cuda""" ) __lowercase : Optional[Any] = """Spiderman is surfing""" __lowercase : str = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowercase : Any = pipe(__a , generator=__a , num_inference_steps=25 , output_type="""pt""" ).frames __lowercase : List[str] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" __lowercase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" ) __lowercase : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) __lowercase : Dict = pipe.to("""cuda""" ) __lowercase : Any = """Spiderman is surfing""" __lowercase : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowercase : str = pipe(__a , generator=__a , num_inference_steps=2 , output_type="""pt""" ).frames __lowercase : Tuple = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
366
from ...processing_utils import ProcessorMixin class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[str] = ['''image_processor''', '''feature_extractor'''] _A : List[Any] = '''TvltImageProcessor''' _A : Optional[int] = '''TvltFeatureExtractor''' def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]: """simple docstring""" super().__init__(image_processor=__a , feature_extractor=__a ) __lowercase : Union[str, Any] = image_processor __lowercase : Tuple = feature_extractor def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict: """simple docstring""" if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) __lowercase : Tuple = None if images is not None: __lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a ) if images_mixed is not None: __lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a ) if audio is not None: __lowercase : Optional[Any] = self.feature_extractor( __a , *__a , sampling_rate=__a , mask_audio=__a , **__a ) __lowercase : Tuple = {} if audio is not None: output_dict.update(__a ) if images is not None: output_dict.update(__a ) if images_mixed_dict is not None: output_dict.update(__a ) return output_dict @property def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase : int = self.image_processor.model_input_names __lowercase : Union[str, Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
306
0
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCAmelCase : '''simple docstring''' def __init__( self : Tuple , __a : List[str] , __a : str=2 , __a : Tuple=3 , __a : Optional[Any]=4 , __a : Optional[Any]=2 , __a : List[str]=7 , __a : Any=True , __a : str=True , __a : str=True , __a : Optional[int]=True , __a : Optional[int]=99 , __a : Optional[int]=36 , __a : List[Any]=3 , __a : List[Any]=4 , __a : int=37 , __a : str="gelu" , __a : Optional[Any]=0.1 , __a : List[str]=0.1 , __a : Tuple=512 , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=0.02 , __a : Optional[Any]=6 , __a : int=6 , __a : Optional[Any]=3 , __a : Optional[Any]=4 , __a : List[str]=None , __a : str=1000 , ) -> Tuple: """simple docstring""" __lowercase : Tuple = parent __lowercase : int = batch_size __lowercase : Dict = num_channels __lowercase : List[Any] = image_size __lowercase : Tuple = patch_size __lowercase : Optional[int] = text_seq_length __lowercase : Optional[Any] = is_training __lowercase : str = use_input_mask __lowercase : List[Any] = use_token_type_ids __lowercase : List[str] = use_labels __lowercase : int = vocab_size __lowercase : Dict = hidden_size __lowercase : Tuple = num_hidden_layers __lowercase : int = num_attention_heads __lowercase : Tuple = intermediate_size __lowercase : List[str] = hidden_act __lowercase : int = hidden_dropout_prob __lowercase : List[str] = attention_probs_dropout_prob __lowercase : int = max_position_embeddings __lowercase : Tuple = type_vocab_size __lowercase : Any = type_sequence_label_size __lowercase : List[Any] = initializer_range __lowercase : List[Any] = coordinate_size __lowercase : Any = shape_size __lowercase : Optional[Any] = num_labels __lowercase : Optional[Any] = num_choices __lowercase : str = scope __lowercase : Union[str, Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __lowercase : str = text_seq_length __lowercase : List[Any] = (image_size // patch_size) ** 2 + 1 __lowercase : Union[str, Any] = self.text_seq_length + self.image_seq_length def lowerCAmelCase ( self : Any ) -> int: """simple docstring""" __lowercase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __lowercase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __lowercase : Optional[int] = bbox[i, j, 3] __lowercase : Union[str, Any] = bbox[i, j, 1] __lowercase : Dict = t if bbox[i, j, 2] < bbox[i, j, 0]: __lowercase : Dict = bbox[i, j, 2] __lowercase : List[str] = bbox[i, j, 0] __lowercase : Optional[Any] = t __lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase : Optional[int] = None if self.use_input_mask: __lowercase : List[str] = random_attention_mask([self.batch_size, self.text_seq_length] ) __lowercase : Any = None if self.use_token_type_ids: __lowercase : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __lowercase : List[Any] = None __lowercase : Dict = None if self.use_labels: __lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __lowercase : Optional[Any] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowerCAmelCase ( self : Dict , __a : int , __a : Optional[Any] , __a : Any , __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : int , __a : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : Dict = LayoutLMvaModel(config=__a ) model.to(__a ) model.eval() # text + image __lowercase : Tuple = model(__a , pixel_values=__a ) __lowercase : List[Any] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a ) __lowercase : Any = model(__a , bbox=__a , pixel_values=__a , token_type_ids=__a ) __lowercase : Tuple = model(__a , bbox=__a , pixel_values=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __lowercase : int = model(__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __lowercase : Tuple = model(pixel_values=__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : Dict , __a : Optional[int] , __a : int , __a : List[str] , __a : Dict , __a : int , __a : Dict ) -> Optional[int]: """simple docstring""" __lowercase : Tuple = self.num_labels __lowercase : List[Any] = LayoutLMvaForSequenceClassification(__a ) model.to(__a ) model.eval() __lowercase : Dict = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : int , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple , __a : Dict , __a : Optional[int] , __a : Optional[Any] , __a : Any ) -> Dict: """simple docstring""" __lowercase : List[str] = self.num_labels __lowercase : int = LayoutLMvaForTokenClassification(config=__a ) model.to(__a ) model.eval() __lowercase : Union[str, Any] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def lowerCAmelCase ( self : str , __a : int , __a : List[str] , __a : Dict , __a : Any , __a : Any , __a : List[str] , __a : Tuple , __a : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : Union[str, Any] = LayoutLMvaForQuestionAnswering(config=__a ) model.to(__a ) model.eval() __lowercase : Optional[int] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Dict = self.prepare_config_and_inputs() ( __lowercase ) : Any = config_and_inputs __lowercase : Union[str, Any] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class lowerCAmelCase ( __a , __a , unittest.TestCase ): '''simple docstring''' _A : Optional[int] = False _A : List[str] = False _A : Optional[Any] = False _A : Union[str, Any] = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) _A : Any = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def lowerCAmelCase ( self : Optional[Any] , __a : Optional[Any] , __a : Optional[int] , __a : List[Any] , __a : int , __a : List[str] ) -> Union[str, Any]: """simple docstring""" return True def lowerCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase : int = LayoutLMvaModelTester(self ) __lowercase : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 ) def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : List[Any] , __a : List[str]=False ) -> List[str]: """simple docstring""" __lowercase : Tuple = copy.deepcopy(__a ) if model_class in get_values(__a ): __lowercase : Optional[int] = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(__a , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__a ): __lowercase : List[str] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__a ) elif model_class in get_values(__a ): __lowercase : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a ) __lowercase : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a ) elif model_class in [ *get_values(__a ), ]: __lowercase : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a ) elif model_class in [ *get_values(__a ), ]: __lowercase : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__a , ) return inputs_dict def lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : Any ) -> int: """simple docstring""" __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowercase : Tuple = type self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" __lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a ) @slow def lowerCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : Union[str, Any] = LayoutLMvaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def snake_case_ ( ): __lowercase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None @slow def lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" __lowercase : Dict = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__a ) __lowercase : int = self.default_image_processor __lowercase : Dict = prepare_img() __lowercase : List[str] = image_processor(images=__a , return_tensors="""pt""" ).pixel_values.to(__a ) __lowercase : Any = torch.tensor([[1, 2]] ) __lowercase : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass __lowercase : Union[str, Any] = model( input_ids=input_ids.to(__a ) , bbox=bbox.to(__a ) , pixel_values=pixel_values.to(__a ) , ) # verify the logits __lowercase : Any = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , __a ) __lowercase : Optional[Any] = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__a ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
367
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class lowerCAmelCase : '''simple docstring''' def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]: """simple docstring""" __lowercase : Tuple = parent __lowercase : int = batch_size __lowercase : Any = seq_length __lowercase : str = is_training __lowercase : str = use_input_mask __lowercase : Optional[int] = use_token_type_ids __lowercase : List[Any] = use_labels __lowercase : Optional[Any] = vocab_size __lowercase : int = hidden_size __lowercase : List[Any] = num_hidden_layers __lowercase : Dict = num_attention_heads __lowercase : Any = intermediate_size __lowercase : Dict = hidden_act __lowercase : Union[str, Any] = hidden_dropout_prob __lowercase : List[Any] = attention_probs_dropout_prob __lowercase : List[str] = max_position_embeddings __lowercase : Union[str, Any] = type_vocab_size __lowercase : Dict = type_sequence_label_size __lowercase : Union[str, Any] = initializer_range __lowercase : List[Any] = num_labels __lowercase : str = num_choices __lowercase : Tuple = scope def lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" __lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : int = None if self.use_input_mask: __lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase : str = None __lowercase : Optional[Any] = None __lowercase : Tuple = None if self.use_labels: __lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowercase : int = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = EsmModel(config=__a ) model.to(__a ) model.eval() __lowercase : str = model(__a , attention_mask=__a ) __lowercase : List[Any] = model(__a ) __lowercase : Optional[int] = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase : List[str] = EsmForMaskedLM(config=__a ) model.to(__a ) model.eval() __lowercase : int = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase : Tuple = self.num_labels __lowercase : Any = EsmForTokenClassification(config=__a ) model.to(__a ) model.eval() __lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" __lowercase : Any = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) : List[str] = config_and_inputs __lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase ( __a , __a , unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = False _A : Any = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) _A : Optional[Any] = () _A : List[Any] = ( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) _A : Optional[Any] = True def lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" __lowercase : Optional[int] = EsmModelTester(self ) __lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowercase : Union[str, Any] = type self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : int ) -> Any: """simple docstring""" __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" __lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) @slow def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : List[str] = EsmModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] __lowercase : List[str] = EsmEmbeddings(config=__a ) __lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) __lowercase : int = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) __lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) def lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] __lowercase : Optional[Any] = EsmEmbeddings(config=__a ) __lowercase : Optional[int] = torch.empty(2 , 4 , 30 ) __lowercase : Tuple = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] __lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) __lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" pass @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" pass @require_torch class lowerCAmelCase ( __a ): '''simple docstring''' @slow def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): __lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() __lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __lowercase : List[str] = model(__a )[0] __lowercase : Union[str, Any] = 33 __lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , __a ) __lowercase : List[Any] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) ) @slow def lowerCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): __lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() __lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) __lowercase : Any = model(__a )[0] # compare the actual values for a slice. __lowercase : int = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
306
0
def snake_case_ ( lowerCAmelCase_ : int = 200 ): __lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200] __lowercase : List[str] = [0] * (pence + 1) __lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowerCAmelCase_ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_00) == 7_36_82
368
def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : int = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def snake_case_ ( lowerCAmelCase_ : int = 5000 ): __lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )] for i, pentagonal_i in enumerate(lowerCAmelCase_ ): for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ): __lowercase : int = pentagonal_nums[j] __lowercase : Optional[int] = pentagonal_i + pentagonal_j __lowercase : Union[str, Any] = pentagonal_j - pentagonal_i if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ): return b return -1 if __name__ == "__main__": print(f'''{solution() = }''')
306
0
from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) lowerCamelCase : Tuple = { '''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': ( '''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json''' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class lowerCAmelCase ( __a ): '''simple docstring''' _A : Any = '''trajectory_transformer''' _A : Union[str, Any] = ['''past_key_values'''] _A : Union[str, Any] = { '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Any , __a : Union[str, Any]=100 , __a : str=5 , __a : Union[str, Any]=1 , __a : Tuple=1 , __a : int=249 , __a : Dict=6 , __a : Tuple=17 , __a : Any=25 , __a : List[Any]=4 , __a : Dict=4 , __a : Tuple=128 , __a : List[str]=0.1 , __a : int=0.1 , __a : Any=0.1 , __a : Union[str, Any]=0.0006 , __a : Optional[int]=512 , __a : Optional[Any]=0.02 , __a : Dict=1E-12 , __a : List[Any]=1 , __a : List[str]=True , __a : Any=1 , __a : Union[str, Any]=50256 , __a : Dict=50256 , **__a : List[Any] , ) -> Dict: """simple docstring""" __lowercase : Optional[Any] = vocab_size __lowercase : Optional[int] = action_weight __lowercase : int = reward_weight __lowercase : List[str] = value_weight __lowercase : List[Any] = max_position_embeddings __lowercase : Optional[Any] = block_size __lowercase : List[Any] = action_dim __lowercase : List[Any] = observation_dim __lowercase : Dict = transition_dim __lowercase : int = learning_rate __lowercase : Optional[int] = n_layer __lowercase : List[str] = n_head __lowercase : Optional[int] = n_embd __lowercase : Dict = embd_pdrop __lowercase : Optional[Any] = attn_pdrop __lowercase : str = resid_pdrop __lowercase : Optional[int] = initializer_range __lowercase : Any = layer_norm_eps __lowercase : Tuple = kaiming_initializer_range __lowercase : str = use_cache super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
369
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase ( __a ): '''simple docstring''' _A : Optional[Any] = (DPMSolverSDEScheduler,) _A : Dict = 10 def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]: """simple docstring""" __lowercase : Any = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**__a ) return config def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__a ) def lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__a ) def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = self.scheduler_classes[0] __lowercase : List[str] = self.get_scheduler_config() __lowercase : Any = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase : Optional[Any] = self.dummy_model() __lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase : Optional[Any] = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): __lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[Any] = model(__a , __a ) __lowercase : Optional[Any] = scheduler.step(__a , __a , __a ) __lowercase : str = output.prev_sample __lowercase : Optional[Any] = torch.sum(torch.abs(__a ) ) __lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase : Tuple = self.scheduler_classes[0] __lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" ) __lowercase : int = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase : Optional[int] = self.dummy_model() __lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase : Dict = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): __lowercase : Dict = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[int] = model(__a , __a ) __lowercase : Optional[int] = scheduler.step(__a , __a , __a ) __lowercase : int = output.prev_sample __lowercase : Optional[Any] = torch.sum(torch.abs(__a ) ) __lowercase : List[str] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3 def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = self.scheduler_classes[0] __lowercase : Dict = self.get_scheduler_config() __lowercase : Optional[int] = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) __lowercase : int = self.dummy_model() __lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: __lowercase : int = scheduler.scale_model_input(__a , __a ) __lowercase : List[str] = model(__a , __a ) __lowercase : List[str] = scheduler.step(__a , __a , __a ) __lowercase : int = output.prev_sample __lowercase : List[Any] = torch.sum(torch.abs(__a ) ) __lowercase : Optional[Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase : str = self.scheduler_classes[0] __lowercase : List[Any] = self.get_scheduler_config() __lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) __lowercase : List[str] = self.dummy_model() __lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma __lowercase : str = sample.to(__a ) for t in scheduler.timesteps: __lowercase : List[Any] = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[Any] = model(__a , __a ) __lowercase : Any = scheduler.step(__a , __a , __a ) __lowercase : Optional[Any] = output.prev_sample __lowercase : Any = torch.sum(torch.abs(__a ) ) __lowercase : Optional[Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
306
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase : Optional[int] = '''▁''' lowerCamelCase : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''} lowerCamelCase : int = { '''vocab_file''': { '''facebook/mbart-large-50-one-to-many-mmt''': ( '''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model''' ), } } lowerCamelCase : Optional[Any] = { '''facebook/mbart-large-50-one-to-many-mmt''': 10_24, } # fmt: off lowerCamelCase : List[Any] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI'''] class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[str] = VOCAB_FILES_NAMES _A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : List[str] = PRETRAINED_VOCAB_FILES_MAP _A : Tuple = ['''input_ids''', '''attention_mask'''] _A : List[int] = [] _A : List[int] = [] def __init__( self : List[Any] , __a : Dict , __a : List[str]=None , __a : Union[str, Any]=None , __a : List[str]="</s>" , __a : List[Any]="</s>" , __a : str="<s>" , __a : str="<unk>" , __a : Tuple="<pad>" , __a : Optional[int]="<mask>" , __a : Optional[Dict[str, Any]] = None , **__a : Union[str, Any] , ) -> None: """simple docstring""" __lowercase : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token __lowercase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs __lowercase : Any = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__a , tgt_lang=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , ) __lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__a ) ) __lowercase : int = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __lowercase : Union[str, Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __lowercase : Optional[Any] = 1 __lowercase : str = len(self.sp_model ) __lowercase : int = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__a ) } __lowercase : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()} __lowercase : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __lowercase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __lowercase : List[Any] = src_lang if src_lang is not None else """en_XX""" __lowercase : str = self.lang_code_to_id[self._src_lang] __lowercase : Optional[Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowerCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def lowerCAmelCase ( self : str , __a : str ) -> None: """simple docstring""" __lowercase : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[str] ) -> Dict: """simple docstring""" __lowercase : str = self.__dict__.copy() __lowercase : Dict = None return state def __setstate__( self : List[str] , __a : Dict ) -> None: """simple docstring""" __lowercase : str = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __lowercase : Any = {} __lowercase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" __lowercase : int = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase ( self : Optional[Any] , __a : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(__a , out_type=__a ) def lowerCAmelCase ( self : Any , __a : str ) -> int: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __lowercase : Dict = self.sp_model.PieceToId(__a ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowerCAmelCase ( self : Dict , __a : int ) -> str: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowerCAmelCase ( self : str , __a : List[str] ) -> Tuple: """simple docstring""" __lowercase : List[str] = [] __lowercase : Tuple = """""" __lowercase : Union[str, Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__a ) + token __lowercase : Optional[int] = True __lowercase : Union[str, Any] = [] else: current_sub_tokens.append(__a ) __lowercase : int = False out_string += self.sp_model.decode(__a ) return out_string.strip() def lowerCAmelCase ( self : str , __a : str , __a : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__a ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return __lowercase : List[str] = os.path.join( __a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __a ) elif not os.path.isfile(self.vocab_file ): with open(__a , """wb""" ) as fi: __lowercase : Any = self.sp_model.serialized_model_proto() fi.write(__a ) return (out_vocab_file,) def lowerCAmelCase ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a ) __lowercase : Any = [1] * len(self.prefix_tokens ) __lowercase : Optional[int] = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__a )) + suffix_ones return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones def lowerCAmelCase ( self : int , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowerCAmelCase ( self : List[Any] , __a : Union[str, Any] , __a : str , __a : Optional[str] , __a : Optional[str] , **__a : Dict ) -> List[str]: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) __lowercase : List[str] = src_lang __lowercase : Optional[Any] = self(__a , add_special_tokens=__a , return_tensors=__a , **__a ) __lowercase : Any = self.convert_tokens_to_ids(__a ) __lowercase : Dict = tgt_lang_id return inputs def lowerCAmelCase ( self : Optional[Any] , __a : List[str] , __a : str = "en_XX" , __a : Optional[List[str]] = None , __a : str = "ro_RO" , **__a : Optional[Any] , ) -> BatchEncoding: """simple docstring""" __lowercase : Dict = src_lang __lowercase : Optional[Any] = tgt_lang return super().prepare_seqaseq_batch(__a , __a , **__a ) def lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def lowerCAmelCase ( self : List[str] ) -> str: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowerCAmelCase ( self : Tuple , __a : str ) -> None: """simple docstring""" __lowercase : int = self.lang_code_to_id[src_lang] __lowercase : int = [self.cur_lang_code_id] __lowercase : int = [self.eos_token_id] def lowerCAmelCase ( self : Optional[int] , __a : str ) -> None: """simple docstring""" __lowercase : Union[str, Any] = self.lang_code_to_id[tgt_lang] __lowercase : Tuple = [self.cur_lang_code_id] __lowercase : str = [self.eos_token_id]
370
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate lowerCamelCase : str = trt.Logger(trt.Logger.WARNING) lowerCamelCase : Any = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) lowerCamelCase : Optional[Any] = logging.getLogger(__name__) lowerCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--onnx_model_path''', default=None, type=str, required=True, help='''Path to ONNX model: ''', ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''The output directory where the model checkpoints and predictions will be written.''', ) # Other parameters parser.add_argument( '''--tokenizer_name''', default='''''', type=str, required=True, help='''Pretrained tokenizer name or path if not the same as model_name''', ) parser.add_argument( '''--version_2_with_negative''', action='''store_true''', help='''If true, the SQuAD examples contain some that do not have an answer.''', ) parser.add_argument( '''--null_score_diff_threshold''', type=float, default=0.0, help='''If null_score - best_non_null is greater than the threshold predict null.''', ) parser.add_argument( '''--max_seq_length''', default=3_84, type=int, help=( '''The maximum total input sequence length after WordPiece tokenization. Sequences ''' '''longer than this will be truncated, and sequences shorter than this will be padded.''' ), ) parser.add_argument( '''--doc_stride''', default=1_28, type=int, help='''When splitting up a long document into chunks, how much stride to take between chunks.''', ) parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''') parser.add_argument( '''--n_best_size''', default=20, type=int, help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''', ) parser.add_argument( '''--max_answer_length''', default=30, type=int, help=( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ), ) parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''') parser.add_argument( '''--dataset_name''', type=str, default=None, required=True, help='''The name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--dataset_config_name''', type=str, default=None, help='''The configuration name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.''' ) parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''') parser.add_argument( '''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision instead of 32-bit''', ) parser.add_argument( '''--int8''', action='''store_true''', help='''Whether to use INT8''', ) lowerCamelCase : Dict = parser.parse_args() if args.tokenizer_name: lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) logger.info('''Training/evaluation parameters %s''', args) lowerCamelCase : List[str] = args.per_device_eval_batch_size lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties lowerCamelCase : List[str] = True lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine''' if args.fpaa: lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine''' if args.inta: lowerCamelCase : int = '''temp_engine/bert-int8.engine''' # import ONNX file if not os.path.exists('''temp_engine'''): os.makedirs('''temp_engine''') lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, '''rb''') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)] lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: lowerCamelCase : List[str] = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) lowerCamelCase : Optional[int] = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) lowerCamelCase : Optional[Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, '''wb''') as f: f.write(engine.serialize()) def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ): __lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) __lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) __lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ ) # start time __lowercase : Optional[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Synchronize the stream and take time stream.synchronize() # end time __lowercase : int = time.time() __lowercase : Union[str, Any] = end_time - start_time __lowercase : Any = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. lowerCamelCase : Tuple = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('''Evaluation requires a dataset name''') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0] lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1] lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). lowerCamelCase : Dict = tokenizer.padding_side == '''right''' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length) def snake_case_ ( lowerCAmelCase_ : int ): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace __lowercase : str = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __lowercase : List[str] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __lowercase : Any = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ ) __lowercase : List[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __lowercase : List[str] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __lowercase : Dict = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples lowerCamelCase : Tuple = raw_datasets['''validation'''] # Validation Feature Creation lowerCamelCase : Optional[int] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='''Running tokenizer on validation dataset''', ) lowerCamelCase : Union[str, Any] = default_data_collator lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping''']) lowerCamelCase : List[str] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ): # Post-processing: we match the start logits and end logits to answers in the original context. __lowercase : int = postprocess_qa_predictions( examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __lowercase : Optional[int] = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: __lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] __lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ ) lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''') # Evaluation! logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path) with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def snake_case_ ( lowerCAmelCase_ : str ): return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize # Allocate device memory for inputs and outputs. lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes) lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. lowerCamelCase : Optional[int] = cuda.Stream() # Evaluation logger.info('''***** Running Evaluation *****''') logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') lowerCamelCase : int = 0.0 lowerCamelCase : List[str] = 0 lowerCamelCase : List[str] = timeit.default_timer() lowerCamelCase : List[Any] = None for step, batch in enumerate(eval_dataloader): lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs lowerCamelCase : Optional[Any] = torch.tensor(start_logits) lowerCamelCase : List[str] = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00) lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00) lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00) if all_preds is not None: lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset)) lowerCamelCase : Dict = timeit.default_timer() - start_time logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter)) logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00)) logger.info('''Total Number of Inference = %d''', niter) lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds) lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
306
0
"""simple docstring""" lowerCamelCase : Any = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []} lowerCamelCase : int = ['''a''', '''b''', '''c''', '''d''', '''e'''] def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] ): __lowercase : int = start # add current to visited visited.append(lowerCAmelCase_ ) __lowercase : List[str] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: __lowercase : Optional[int] = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # if all neighbors visited add current to sort sort.append(lowerCAmelCase_ ) # if all vertices haven't been visited select a new one to visit if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): for vertice in vertices: if vertice not in visited: __lowercase : Union[str, Any] = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # return sort return sort if __name__ == "__main__": lowerCamelCase : int = topological_sort('''a''', [], []) print(sort)
371
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase : str = { '''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''', } class lowerCAmelCase ( __a ): '''simple docstring''' _A : int = '''nllb-moe''' _A : List[str] = ['''past_key_values'''] _A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any: """simple docstring""" __lowercase : int = vocab_size __lowercase : List[Any] = max_position_embeddings __lowercase : Tuple = d_model __lowercase : str = encoder_ffn_dim __lowercase : List[str] = encoder_layers __lowercase : int = encoder_attention_heads __lowercase : List[Any] = decoder_ffn_dim __lowercase : int = decoder_layers __lowercase : Optional[int] = decoder_attention_heads __lowercase : Union[str, Any] = dropout __lowercase : str = attention_dropout __lowercase : Any = activation_dropout __lowercase : List[Any] = activation_function __lowercase : List[str] = init_std __lowercase : Optional[int] = encoder_layerdrop __lowercase : str = decoder_layerdrop __lowercase : Dict = use_cache __lowercase : Optional[Any] = encoder_layers __lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True __lowercase : List[Any] = router_z_loss_coef __lowercase : Tuple = router_aux_loss_coef __lowercase : str = decoder_sparse_step __lowercase : Any = encoder_sparse_step __lowercase : str = num_experts __lowercase : List[Any] = expert_capacity __lowercase : int = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" ) __lowercase : Optional[int] = router_dtype __lowercase : Any = router_ignore_padding_tokens __lowercase : Optional[Any] = batch_prioritized_routing __lowercase : str = second_expert_policy __lowercase : List[str] = normalize_router_prob_before_dropping __lowercase : List[Any] = moe_eval_capacity_token_fraction __lowercase : List[str] = moe_token_dropout __lowercase : Optional[Any] = output_router_logits super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
306
0
"""simple docstring""" # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union lowerCamelCase : Optional[int] = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class lowerCAmelCase : '''simple docstring''' _A : str _A : Optional[str] = None _A : Optional[Union[str, int]] = None _A : Optional[Union[str, int]] = None _A : Optional[Union[str, int]] = None def lowerCAmelCase ( self : Any ) -> Tuple: """simple docstring""" __lowercase : str = _str_to_version_tuple(self.version_str ) def __repr__( self : Optional[int] ) -> Tuple: """simple docstring""" return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}" @property def lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" return self.major, self.minor, self.patch def lowerCAmelCase ( self : Optional[Any] , __a : str ) -> Tuple: """simple docstring""" if isinstance(__a , __a ): return Version(__a ) elif isinstance(__a , __a ): return other raise TypeError(F"{other} (type {type(__a )}) cannot be compared to version." ) def __eq__( self : Optional[Any] , __a : int ) -> Any: """simple docstring""" try: __lowercase : List[str] = self._validate_operand(__a ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : int , __a : List[Any] ) -> Any: """simple docstring""" __lowercase : List[str] = self._validate_operand(__a ) return self.tuple < other.tuple def __hash__( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def lowerCAmelCase ( cls : Optional[int] , __a : Union[str, Any] ) -> int: """simple docstring""" __lowercase : Optional[Any] = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" return self.version_str def snake_case_ ( lowerCAmelCase_ : List[Any] ): __lowercase : Optional[Any] = _VERSION_REG.match(lowerCAmelCase_ ) if not res: raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." ) return tuple(int(lowerCAmelCase_ ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] ) def snake_case_ ( lowerCAmelCase_ : List[Any] ): return ".".join(str(lowerCAmelCase_ ) for v in version_tuple )
350
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase : Optional[Any] = { '''configuration_poolformer''': [ '''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PoolFormerConfig''', '''PoolFormerOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = ['''PoolFormerFeatureExtractor'''] lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[str] = [ '''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PoolFormerForImageClassification''', '''PoolFormerModel''', '''PoolFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
306
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" __lowercase : Any = tempfile.mkdtemp() # fmt: off __lowercase : Union[str, Any] = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on __lowercase : Optional[int] = dict(zip(__a , range(len(__a ) ) ) ) __lowercase : Optional[Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] __lowercase : Optional[Any] = {"""unk_token""": """<unk>"""} __lowercase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __lowercase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__a ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__a ) ) __lowercase : Any = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], } __lowercase : List[str] = os.path.join(self.tmpdirname , __a ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__a , __a ) def lowerCAmelCase ( self : Optional[Any] , **__a : Union[str, Any] ) -> List[str]: """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **__a ) def lowerCAmelCase ( self : Union[str, Any] , **__a : str ) -> Optional[Any]: """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **__a ) def lowerCAmelCase ( self : int , **__a : List[Any] ) -> Optional[Any]: """simple docstring""" return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__a ) def lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" __lowercase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __lowercase : List[Any] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase : Any = self.get_tokenizer() __lowercase : Union[str, Any] = self.get_rust_tokenizer() __lowercase : List[str] = self.get_image_processor() __lowercase : str = OwlViTProcessor(tokenizer=__a , image_processor=__a ) processor_slow.save_pretrained(self.tmpdirname ) __lowercase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__a ) __lowercase : Optional[Any] = OwlViTProcessor(tokenizer=__a , image_processor=__a ) processor_fast.save_pretrained(self.tmpdirname ) __lowercase : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __a ) self.assertIsInstance(processor_fast.tokenizer , __a ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __a ) self.assertIsInstance(processor_fast.image_processor , __a ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase : Union[str, Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase : List[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __lowercase : Optional[Any] = self.get_image_processor(do_normalize=__a ) __lowercase : Optional[int] = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__a ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" __lowercase : List[Any] = self.get_image_processor() __lowercase : str = self.get_tokenizer() __lowercase : Any = OwlViTProcessor(tokenizer=__a , image_processor=__a ) __lowercase : Any = self.prepare_image_inputs() __lowercase : Union[str, Any] = image_processor(__a , return_tensors="""np""" ) __lowercase : str = processor(images=__a , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" __lowercase : Optional[Any] = self.get_image_processor() __lowercase : Dict = self.get_tokenizer() __lowercase : Tuple = OwlViTProcessor(tokenizer=__a , image_processor=__a ) __lowercase : int = """lower newer""" __lowercase : Optional[Any] = processor(text=__a , return_tensors="""np""" ) __lowercase : Optional[int] = tokenizer(__a , return_tensors="""np""" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" __lowercase : List[str] = self.get_image_processor() __lowercase : Dict = self.get_tokenizer() __lowercase : Any = OwlViTProcessor(tokenizer=__a , image_processor=__a ) __lowercase : Any = """lower newer""" __lowercase : List[str] = self.prepare_image_inputs() __lowercase : Dict = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(__a ): processor() def lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" __lowercase : Optional[int] = """google/owlvit-base-patch32""" __lowercase : Dict = OwlViTProcessor.from_pretrained(__a ) __lowercase : str = ["""cat""", """nasa badge"""] __lowercase : int = processor(text=__a ) __lowercase : Any = 16 self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__a ): processor() def lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" __lowercase : int = """google/owlvit-base-patch32""" __lowercase : Union[str, Any] = OwlViTProcessor.from_pretrained(__a ) __lowercase : Dict = [["""cat""", """nasa badge"""], ["""person"""]] __lowercase : List[str] = processor(text=__a ) __lowercase : int = 16 __lowercase : Tuple = len(__a ) __lowercase : List[Any] = max([len(__a ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__a ): processor() def lowerCAmelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" __lowercase : List[str] = """google/owlvit-base-patch32""" __lowercase : List[str] = OwlViTProcessor.from_pretrained(__a ) __lowercase : Optional[int] = ["""cat""", """nasa badge"""] __lowercase : List[str] = processor(text=__a ) __lowercase : Optional[int] = 16 __lowercase : str = inputs["""input_ids"""] __lowercase : Optional[int] = [ [49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" __lowercase : Any = self.get_image_processor() __lowercase : Union[str, Any] = self.get_tokenizer() __lowercase : List[Any] = OwlViTProcessor(tokenizer=__a , image_processor=__a ) __lowercase : str = self.prepare_image_inputs() __lowercase : Dict = self.prepare_image_inputs() __lowercase : Any = processor(images=__a , query_images=__a ) self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(__a ): processor() def lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" __lowercase : List[str] = self.get_image_processor() __lowercase : str = self.get_tokenizer() __lowercase : List[str] = OwlViTProcessor(tokenizer=__a , image_processor=__a ) __lowercase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowercase : Optional[int] = processor.batch_decode(__a ) __lowercase : Any = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a )
351
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : List[str] = 2 __lowercase : Union[str, Any] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowerCAmelCase_ ) if n > 1: factors.append(lowerCAmelCase_ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
306
0
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCamelCase : int = logging.get_logger(__name__) lowerCamelCase : Any = {'''vocab_file''': '''vocab.txt'''} lowerCamelCase : Dict = { '''vocab_file''': { '''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''', }, } lowerCamelCase : Tuple = { '''openbmb/cpm-ant-10b''': 10_24, } def snake_case_ ( lowerCAmelCase_ : str ): __lowercase : str = collections.OrderedDict() with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as reader: __lowercase : Any = reader.readlines() for index, token in enumerate(lowerCAmelCase_ ): __lowercase : List[Any] = token.rstrip("""\n""" ) __lowercase : str = index return vocab class lowerCAmelCase ( __a ): '''simple docstring''' def __init__( self : str , __a : Union[str, Any] , __a : Dict="<unk>" , __a : Tuple=200 ) -> str: """simple docstring""" __lowercase : Optional[Any] = vocab __lowercase : List[Any] = unk_token __lowercase : List[Any] = max_input_chars_per_word def lowerCAmelCase ( self : Optional[Any] , __a : int ) -> List[Any]: """simple docstring""" __lowercase : Union[str, Any] = list(__a ) if len(__a ) > self.max_input_chars_per_word: return [self.unk_token] __lowercase : Union[str, Any] = 0 __lowercase : str = [] while start < len(__a ): __lowercase : str = len(__a ) __lowercase : List[str] = None while start < end: __lowercase : Dict = """""".join(chars[start:end] ) if substr in self.vocab: __lowercase : int = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(__a ) __lowercase : str = end return sub_tokens class lowerCAmelCase ( __a ): '''simple docstring''' _A : Optional[Any] = VOCAB_FILES_NAMES _A : List[Any] = PRETRAINED_VOCAB_FILES_MAP _A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : Dict = ['''input_ids''', '''attention_mask'''] _A : Dict = False def __init__( self : Tuple , __a : List[str] , __a : List[str]="<d>" , __a : List[str]="</d>" , __a : Optional[int]="<s>" , __a : Tuple="</s>" , __a : Optional[int]="<pad>" , __a : Dict="<unk>" , __a : str="</n>" , __a : List[str]="</_>" , __a : Optional[int]="left" , **__a : Any , ) -> List[Any]: """simple docstring""" requires_backends(self , ["""jieba"""] ) super().__init__( bod_token=__a , eod_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , unk_token=__a , line_token=__a , space_token=__a , padding_side=__a , **__a , ) __lowercase : Dict = bod_token __lowercase : List[str] = eod_token __lowercase : Tuple = load_vocab(__a ) __lowercase : Union[str, Any] = self.encoder[space_token] __lowercase : Union[str, Any] = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] __lowercase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __a : x[1] ) ) __lowercase : List[str] = {v: k for k, v in self.encoder.items()} __lowercase : Dict = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" return self.encoder[self.bod_token] @property def lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" return self.encoder[self.eod_token] @property def lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" return self.encoder["\n"] @property def lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" return len(self.encoder ) def lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self : Optional[Any] , __a : Tuple ) -> Tuple: """simple docstring""" __lowercase : Optional[int] = [] for x in jieba.cut(__a , cut_all=__a ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(__a ) ) return output_tokens def lowerCAmelCase ( self : Tuple , __a : Any , **__a : Tuple ) -> int: """simple docstring""" __lowercase : str = [i for i in token_ids if i >= 0] __lowercase : Union[str, Any] = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(__a , **__a ) def lowerCAmelCase ( self : List[Any] , __a : str ) -> Any: """simple docstring""" return token in self.encoder def lowerCAmelCase ( self : int , __a : List[str] ) -> str: """simple docstring""" return "".join(__a ) def lowerCAmelCase ( self : List[Any] , __a : Dict ) -> Optional[int]: """simple docstring""" return self.encoder.get(__a , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self : Union[str, Any] , __a : List[str] ) -> Any: """simple docstring""" return self.decoder.get(__a , self.unk_token ) def lowerCAmelCase ( self : Any , __a : str , __a : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if os.path.isdir(__a ): __lowercase : List[str] = os.path.join( __a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) else: __lowercase : int = (filename_prefix + """-""" if filename_prefix else """""") + save_directory __lowercase : str = 0 if " " in self.encoder: __lowercase : Tuple = self.encoder[""" """] del self.encoder[" "] if "\n" in self.encoder: __lowercase : Optional[Any] = self.encoder["""\n"""] del self.encoder["\n"] __lowercase : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __a : x[1] ) ) with open(__a , """w""" , encoding="""utf-8""" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." """ Please check that the vocabulary is not corrupted!""" ) __lowercase : Any = token_index writer.write(token + """\n""" ) index += 1 return (vocab_file,) def lowerCAmelCase ( self : int , __a : List[int] , __a : List[int] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def lowerCAmelCase ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a ) if token_ids_a is not None: return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) return [1] + ([0] * len(__a ))
352
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __lowercase : List[str] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __lowercase : Optional[Any] = model(__a )["""last_hidden_state"""] __lowercase : Any = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __a ) # compare the actual values for a slice. __lowercase : Dict = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
306
0
import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : str , __a : List[Any] , __a : Union[str, Any]=13 , __a : Optional[int]=7 , __a : int=True , __a : Tuple=True , __a : int=True , __a : str=True , __a : Union[str, Any]=99 , __a : Union[str, Any]=32 , __a : Tuple=5 , __a : Dict=4 , __a : Optional[int]=37 , __a : Tuple="gelu" , __a : Optional[int]=0.1 , __a : Tuple=0.1 , __a : List[str]=512 , __a : Union[str, Any]=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : Dict=4 , ) -> Union[str, Any]: """simple docstring""" __lowercase : str = parent __lowercase : Tuple = batch_size __lowercase : int = seq_length __lowercase : int = is_training __lowercase : Optional[int] = use_attention_mask __lowercase : Dict = use_token_type_ids __lowercase : Any = use_labels __lowercase : List[Any] = vocab_size __lowercase : List[str] = hidden_size __lowercase : Tuple = num_hidden_layers __lowercase : List[str] = num_attention_heads __lowercase : Optional[int] = intermediate_size __lowercase : int = hidden_act __lowercase : Union[str, Any] = hidden_dropout_prob __lowercase : List[str] = attention_probs_dropout_prob __lowercase : str = max_position_embeddings __lowercase : List[str] = type_vocab_size __lowercase : Optional[Any] = type_sequence_label_size __lowercase : str = initializer_range __lowercase : Tuple = num_choices def lowerCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : List[Any] = None if self.use_attention_mask: __lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase : List[Any] = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__a , ) return config, input_ids, attention_mask def lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase : Tuple = self.prepare_config_and_inputs() __lowercase : str = config_and_inputs __lowercase : int = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' _A : List[str] = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase : List[str] = FlaxDistilBertModelTester(self ) @slow def lowerCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" for model_class_name in self.all_model_classes: __lowercase : Optional[Any] = model_class_name.from_pretrained("""distilbert-base-uncased""" ) __lowercase : Any = model(np.ones((1, 1) ) ) self.assertIsNotNone(__a ) @require_flax class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase ( self : int ) -> Any: """simple docstring""" __lowercase : str = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" ) __lowercase : Union[str, Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __lowercase : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __lowercase : List[Any] = model(__a , attention_mask=__a )[0] __lowercase : Union[str, Any] = (1, 11, 768) self.assertEqual(output.shape , __a ) __lowercase : List[str] = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) )
353
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : Optional[Any] = len(lowerCAmelCase_ ) __lowercase : str = len(lowerCAmelCase_ ) __lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __lowercase : Tuple = True for i in range(lowerCAmelCase_ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __lowercase : Optional[Any] = True if a[i].islower(): __lowercase : Dict = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
306
0
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() def lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" __lowercase : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) __lowercase : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) __lowercase : Union[str, Any] = """xvjiarui/stable-diffusion-2-inpainting""" __lowercase : List[str] = FlaxStableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a ) __lowercase : Optional[int] = """Face of a yellow cat, high resolution, sitting on a park bench""" __lowercase : List[str] = jax.random.PRNGKey(0 ) __lowercase : List[Any] = 50 __lowercase : str = jax.device_count() __lowercase : List[str] = num_samples * [prompt] __lowercase : int = num_samples * [init_image] __lowercase : Union[str, Any] = num_samples * [mask_image] __lowercase : Union[str, Any] = pipeline.prepare_inputs(__a , __a , __a ) # shard inputs and rng __lowercase : Tuple = replicate(__a ) __lowercase : List[str] = jax.random.split(__a , jax.device_count() ) __lowercase : List[str] = shard(__a ) __lowercase : int = shard(__a ) __lowercase : List[Any] = shard(__a ) __lowercase : Optional[int] = pipeline( __a , __a , __a , __a , __a , __a , jit=__a ) __lowercase : List[str] = output.images.reshape(__a , 512 , 512 , 3 ) __lowercase : List[Any] = images[0, 253:256, 253:256, -1] __lowercase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowercase : Optional[Any] = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(F"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
354
from scipy.stats import spearmanr import datasets lowerCamelCase : List[str] = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' lowerCamelCase : List[str] = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' lowerCamelCase : Union[str, Any] = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , ) def lowerCAmelCase ( self : List[Any] , __a : str , __a : Any , __a : Optional[int]=False ) -> List[str]: """simple docstring""" __lowercase : Optional[Any] = spearmanr(__a , __a ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
306
0
import sys from collections import defaultdict class lowerCAmelCase : '''simple docstring''' def __init__( self : int ) -> Dict: """simple docstring""" __lowercase : str = [] def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] ) -> Any: """simple docstring""" return self.node_position[vertex] def lowerCAmelCase ( self : Dict , __a : Tuple , __a : Tuple ) -> int: """simple docstring""" __lowercase : Tuple = pos def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : Optional[Any] , __a : int , __a : Optional[Any] ) -> List[Any]: """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __lowercase : Union[str, Any] = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __lowercase : Tuple = 2 * start + 1 else: __lowercase : Dict = 2 * start + 2 if heap[smallest_child] < heap[start]: __lowercase : Dict = heap[smallest_child], positions[smallest_child] __lowercase : int = ( heap[start], positions[start], ) __lowercase : Dict = temp, tempa __lowercase : Optional[Any] = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , __a ) self.top_to_bottom(__a , __a , __a , __a ) def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[Any] , __a : int , __a : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase : Union[str, Any] = position[index] while index != 0: __lowercase : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __lowercase : Union[str, Any] = heap[parent] __lowercase : List[str] = position[parent] self.set_position(position[parent] , __a ) else: __lowercase : List[Any] = val __lowercase : Optional[int] = temp self.set_position(__a , __a ) break __lowercase : Optional[Any] = parent else: __lowercase : int = val __lowercase : Dict = temp self.set_position(__a , 0 ) def lowerCAmelCase ( self : str , __a : Optional[int] , __a : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase : int = len(__a ) // 2 - 1 for i in range(__a , -1 , -1 ): self.top_to_bottom(__a , __a , len(__a ) , __a ) def lowerCAmelCase ( self : List[str] , __a : List[str] , __a : int ) -> List[str]: """simple docstring""" __lowercase : int = positions[0] __lowercase : Optional[Any] = sys.maxsize self.top_to_bottom(__a , 0 , len(__a ) , __a ) return temp def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ): __lowercase : int = Heap() __lowercase : Optional[Any] = [0] * len(lowerCAmelCase_ ) __lowercase : List[Any] = [-1] * len(lowerCAmelCase_ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __lowercase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex __lowercase : List[str] = [] for vertex in range(len(lowerCAmelCase_ ) ): distance_tv.append(sys.maxsize ) positions.append(lowerCAmelCase_ ) heap.node_position.append(lowerCAmelCase_ ) __lowercase : str = [] __lowercase : Tuple = 1 __lowercase : int = sys.maxsize for neighbor, distance in adjacency_list[0]: __lowercase : int = 0 __lowercase : Dict = distance heap.heapify(lowerCAmelCase_ , lowerCAmelCase_ ) for _ in range(1 , len(lowerCAmelCase_ ) ): __lowercase : List[str] = heap.delete_minimum(lowerCAmelCase_ , lowerCAmelCase_ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __lowercase : Optional[Any] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(lowerCAmelCase_ )] ): __lowercase : Dict = distance heap.bottom_to_top( lowerCAmelCase_ , heap.get_position(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Optional[int] = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > lowerCamelCase : Optional[Any] = int(input('''Enter number of edges: ''').strip()) lowerCamelCase : Tuple = defaultdict(list) for _ in range(edges_number): lowerCamelCase : Optional[Any] = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
355
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : Any = get_failure_array(lowerCAmelCase_ ) # 2) Step through text searching for pattern __lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern while i < len(lowerCAmelCase_ ): if pattern[j] == text[i]: if j == (len(lowerCAmelCase_ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase : Optional[Any] = failure[j - 1] continue i += 1 return False def snake_case_ ( lowerCAmelCase_ : str ): __lowercase : List[Any] = [0] __lowercase : Optional[Any] = 0 __lowercase : List[Any] = 1 while j < len(lowerCAmelCase_ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase : List[str] = failure[i - 1] continue j += 1 failure.append(lowerCAmelCase_ ) return failure if __name__ == "__main__": # Test 1) lowerCamelCase : Dict = '''abc1abc12''' lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCamelCase : List[Any] = '''ABABX''' lowerCamelCase : List[Any] = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCamelCase : int = '''AAAB''' lowerCamelCase : Optional[int] = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCamelCase : Optional[Any] = '''abcdabcy''' lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCamelCase : Dict = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
306
0
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Tuple=13 , __a : str=64 , __a : List[Any]=2 , __a : int=3 , __a : Union[str, Any]=True , __a : Tuple=True , __a : Tuple=32 , __a : Dict=5 , __a : str=4 , __a : List[str]=37 , __a : int="gelu" , __a : Union[str, Any]=0.1 , __a : List[Any]=0.1 , __a : Optional[int]=10 , __a : int=0.02 , __a : List[str]=[1, 16, 4, 4] , __a : Tuple=None , ) -> int: """simple docstring""" __lowercase : Union[str, Any] = parent __lowercase : str = batch_size __lowercase : Tuple = image_size __lowercase : Any = patch_size __lowercase : int = num_channels __lowercase : List[str] = is_training __lowercase : List[Any] = use_labels __lowercase : Optional[Any] = hidden_size __lowercase : Tuple = num_hidden_layers __lowercase : List[str] = num_attention_heads __lowercase : Optional[Any] = intermediate_size __lowercase : Optional[int] = hidden_act __lowercase : str = hidden_dropout_prob __lowercase : Tuple = attention_probs_dropout_prob __lowercase : Optional[Any] = type_sequence_label_size __lowercase : Dict = initializer_range __lowercase : Optional[Any] = scope __lowercase : int = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size __lowercase : List[Any] = (self.image_size // 32) ** 2 __lowercase : Tuple = num_patches + 1 def lowerCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase : int = None if self.use_labels: __lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : Optional[Any] = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self : str ) -> int: """simple docstring""" __lowercase : str = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, """hidden_sizes""": [4, 8, 16, 32], """num_groups""": 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__a , ) def lowerCAmelCase ( self : List[Any] , __a : Any , __a : List[Any] , __a : Tuple ) -> str: """simple docstring""" __lowercase : str = ViTHybridModel(config=__a ) model.to(__a ) model.eval() __lowercase : Any = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : List[str] , __a : int , __a : Any , __a : List[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = self.type_sequence_label_size __lowercase : List[str] = ViTHybridForImageClassification(__a ) model.to(__a ) model.eval() __lowercase : Tuple = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" __lowercase : Optional[int] = self.prepare_config_and_inputs() __lowercase : List[Any] = config_and_inputs __lowercase : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase ( __a , __a , unittest.TestCase ): '''simple docstring''' _A : Dict = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () _A : Optional[int] = ( {'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification} if is_torch_available() else {} ) _A : Any = False _A : List[str] = False _A : Any = False def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase : Dict = ViTHybridModelTester(self ) __lowercase : Optional[int] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def lowerCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def lowerCAmelCase ( self : int ) -> int: """simple docstring""" pass def lowerCAmelCase ( self : str ) -> int: """simple docstring""" __lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Union[str, Any] = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowercase : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def lowerCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Optional[int] = model_class(__a ) __lowercase : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : List[str] = [*signature.parameters.keys()] __lowercase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" __lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" __lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : List[Any] = _config_zero_init(__a ) for model_class in self.all_model_classes: __lowercase : Union[str, Any] = model_class(config=__a ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": __lowercase : Optional[Any] = [F"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) @slow def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : Optional[Any] = ViTHybridModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def snake_case_ ( ): __lowercase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase : Dict = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __a ) __lowercase : Union[str, Any] = self.default_image_processor __lowercase : List[str] = prepare_img() __lowercase : str = image_processor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): __lowercase : Any = model(**__a ) # verify the logits __lowercase : str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) __lowercase : Optional[int] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) ) @slow @require_accelerate def lowerCAmelCase ( self : List[Any] ) -> Tuple: """simple docstring""" __lowercase : List[str] = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" ) __lowercase : List[str] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" ) __lowercase : int = prepare_img() __lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ) __lowercase : Any = model(**__a ) __lowercase : Dict = outputs.logits # model predicts one of the 1000 ImageNet classes __lowercase : str = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
356
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[str] = ['''pixel_values'''] def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None: """simple docstring""" super().__init__(**__a ) __lowercase : Dict = size if size is not None else {"""shortest_edge""": 224} __lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a ) __lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" ) __lowercase : Optional[int] = do_resize __lowercase : Union[str, Any] = size __lowercase : List[Any] = resample __lowercase : Any = do_center_crop __lowercase : Dict = crop_size __lowercase : int = do_rescale __lowercase : Tuple = rescale_factor __lowercase : List[Any] = do_normalize __lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD __lowercase : Union[str, Any] = do_convert_rgb def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray: """simple docstring""" __lowercase : Dict = get_size_dict(__a , default_to_square=__a ) if "shortest_edge" not in size: raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) __lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray: """simple docstring""" __lowercase : Tuple = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a ) def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]: """simple docstring""" return rescale(__a , scale=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray: """simple docstring""" return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image: """simple docstring""" __lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize __lowercase : Dict = size if size is not None else self.size __lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a ) __lowercase : int = resample if resample is not None else self.resample __lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size __lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a ) __lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize __lowercase : Tuple = image_mean if image_mean is not None else self.image_mean __lowercase : str = image_std if image_std is not None else self.image_std __lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowercase : Union[str, Any] = make_list_of_images(__a ) if not valid_images(__a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images] # All transformations expect numpy arrays. __lowercase : Any = [to_numpy_array(__a ) for image in images] if do_resize: __lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images] if do_center_crop: __lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images] if do_rescale: __lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images] if do_normalize: __lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images] __lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images] __lowercase : Optional[int] = {"""pixel_values""": images} return BatchFeature(data=__a , tensor_type=__a )
306
0
import string def snake_case_ ( lowerCAmelCase_ : str ): for key in range(len(string.ascii_uppercase ) ): __lowercase : Any = """""" for symbol in message: if symbol in string.ascii_uppercase: __lowercase : Union[str, Any] = string.ascii_uppercase.find(lowerCAmelCase_ ) __lowercase : Any = num - key if num < 0: __lowercase : Optional[int] = num + len(string.ascii_uppercase ) __lowercase : List[Any] = translated + string.ascii_uppercase[num] else: __lowercase : Union[str, Any] = translated + symbol print(F"Decryption using Key #{key}: {translated}" ) def snake_case_ ( ): __lowercase : int = input("""Encrypted message: """ ) __lowercase : int = message.upper() decrypt(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
357
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ): __lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ ) return new.join(lowerCAmelCase_ ) def snake_case_ ( lowerCAmelCase_ : List[Any] ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : List[str] = {} __lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." ) if "res_path" in key: __lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" ) if key.endswith(""".w""" ): __lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 ) if key.endswith(""".b""" ): __lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 ) __lowercase : Dict = value.float() return upgrade @torch.no_grad() def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ): from dall_e import Encoder __lowercase : Any = Encoder() if os.path.exists(lowerCAmelCase_ ): __lowercase : List[Any] = torch.load(lowerCAmelCase_ ) else: __lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : int = ckpt.state_dict() encoder.load_state_dict(lowerCAmelCase_ ) if config_path is not None: __lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ ) else: __lowercase : List[str] = FlavaImageCodebookConfig() __lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval() __lowercase : List[Any] = encoder.state_dict() __lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ ) hf_model.load_state_dict(lowerCAmelCase_ ) __lowercase : Dict = hf_model.state_dict() __lowercase : Tuple = count_parameters(lowerCAmelCase_ ) __lowercase : Tuple = count_parameters(lowerCAmelCase_ ) assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(lowerCAmelCase_ ) else: return hf_state_dict if __name__ == "__main__": lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowerCamelCase : Union[str, Any] = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
306
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : int = logging.get_logger(__name__) lowerCamelCase : Optional[int] = { '''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''', } class lowerCAmelCase ( __a ): '''simple docstring''' _A : str = '''switch_transformers''' _A : Any = ['''past_key_values'''] _A : Any = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Union[str, Any] , __a : str=32128 , __a : Dict=768 , __a : str=64 , __a : List[str]=2048 , __a : List[Any]=64 , __a : Union[str, Any]=12 , __a : Optional[Any]=3 , __a : str=12 , __a : int=3 , __a : Tuple=12 , __a : Any=8 , __a : List[str]=False , __a : Union[str, Any]=0.01 , __a : List[Any]="float32" , __a : Tuple=False , __a : List[Any]=32 , __a : int=128 , __a : List[Any]=0.1 , __a : str=1E-6 , __a : Optional[Any]=0.001 , __a : Optional[int]=0.001 , __a : str=1.0 , __a : List[Any]="relu" , __a : Dict=True , __a : Optional[Any]=False , __a : Optional[Any]=True , __a : Optional[Any]=0 , __a : Union[str, Any]=1 , **__a : Optional[int] , ) -> List[Any]: """simple docstring""" __lowercase : Dict = vocab_size __lowercase : List[Any] = d_model __lowercase : Dict = d_kv __lowercase : List[Any] = d_ff __lowercase : Dict = num_sparse_encoder_layers __lowercase : int = num_layers __lowercase : Optional[Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __lowercase : Union[str, Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: __lowercase : str = self.num_layers // self.num_sparse_encoder_layers else: __lowercase : str = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: __lowercase : int = self.num_decoder_layers // self.num_sparse_decoder_layers else: __lowercase : str = self.num_decoder_layers # HACK: this will create 0 sparse layers __lowercase : Optional[Any] = num_heads __lowercase : Any = num_experts __lowercase : Optional[int] = expert_capacity __lowercase : Optional[int] = router_bias __lowercase : List[Any] = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" ) __lowercase : int = router_dtype __lowercase : List[str] = router_ignore_padding_tokens __lowercase : str = relative_attention_num_buckets __lowercase : List[Any] = relative_attention_max_distance __lowercase : Any = dropout_rate __lowercase : Dict = layer_norm_epsilon __lowercase : Optional[Any] = initializer_factor __lowercase : int = feed_forward_proj __lowercase : Tuple = use_cache __lowercase : Optional[Any] = add_router_probs __lowercase : Optional[Any] = router_z_loss_coef __lowercase : List[str] = router_aux_loss_coef __lowercase : Union[str, Any] = self.feed_forward_proj.split("""-""" ) __lowercase : List[str] = act_info[-1] __lowercase : Optional[int] = act_info[0] == """gated""" if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2: raise ValueError( F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer." """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __lowercase : List[Any] = """gelu_new""" super().__init__( pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , )
358
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging lowerCamelCase : Tuple = logging.get_logger(__name__) logging.set_verbosity_info() def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): if "xprophetnet" in prophetnet_checkpoint_path: __lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ ) __lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ ) else: __lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ ) __lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ ) __lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""] __lowercase : Optional[int] = { """self_attn""": """ngram_self_attn""", """cross_attn""": """encoder_attn""", """cross_attn_layer_norm""": """encoder_attn_layer_norm""", """feed_forward_layer_norm""": """final_layer_norm""", """feed_forward""": """""", """intermediate""": """fc1""", """output""": """fc2""", """key_proj""": """k_proj""", """query_proj""": """q_proj""", """value_proj""": """v_proj""", """word_embeddings""": """embed_tokens""", """embeddings_layer_norm""": """emb_layer_norm""", """relative_pos_embeddings""": """relative_linear""", """ngram_embeddings""": """ngram_input_embed""", """position_embeddings""": """embed_positions""", } for key in loading_info["missing_keys"]: __lowercase : Tuple = key.split(""".""" ) if attributes[0] == "lm_head": __lowercase : str = prophet __lowercase : List[str] = prophet_old else: __lowercase : Tuple = prophet.prophetnet __lowercase : Union[str, Any] = prophet_old.model __lowercase : Optional[Any] = False for attribute in attributes: if attribute in mapping: __lowercase : Optional[int] = mapping[attribute] if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0: __lowercase : str = attribute elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : List[Any] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" __lowercase : Any = old_model.weight logger.info(F"{attribute} is initialized." ) __lowercase : Any = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" __lowercase : Dict = old_model.bias logger.info(F"{attribute} is initialized" ) __lowercase : int = True break elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ): __lowercase : Dict = old_model.in_proj_weight.shape[0] // 3 __lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": __lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) __lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": __lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) __lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": __lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) __lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) __lowercase : int = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." __lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] ) __lowercase : int = True break if attribute.isdigit(): __lowercase : Tuple = model[int(lowerCAmelCase_ )] __lowercase : int = old_model[int(lowerCAmelCase_ )] else: __lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if old_attribute == "": __lowercase : int = old_model else: if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError(F"{old_model} does not have {old_attribute}" ) __lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if not is_key_init: raise ValueError(F"{key} was not correctly initialized!" ) print(F"Saving model to {pytorch_dump_folder_path}" ) prophet.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCamelCase : Any = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
306
0
"""simple docstring""" import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''') # parameters used in DuplicationIndex lowerCamelCase : Union[str, Any] = 10 lowerCamelCase : List[str] = 2_56 def snake_case_ ( lowerCAmelCase_ : List[str] ): if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS: return None __lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ ) for token in set(lowerCAmelCase_ ): min_hash.update(token.encode() ) return min_hash def snake_case_ ( lowerCAmelCase_ : str ): return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0} class lowerCAmelCase : '''simple docstring''' def __init__( self : List[str] , *, __a : float = 0.85 , ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[Any] = duplication_jaccard_threshold __lowercase : Optional[Any] = NUM_PERM __lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) __lowercase : List[str] = defaultdict(__a ) def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None: """simple docstring""" __lowercase : List[Any] = self._index.query(__a ) if code_key in self._index.keys: print(F"Duplicate key {code_key}" ) return self._index.insert(__a , __a ) if len(__a ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(__a ) break else: self._duplicate_clusters[close_duplicates[0]].add(__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]: """simple docstring""" __lowercase : Dict = [] for base, duplicates in self._duplicate_clusters.items(): __lowercase : List[str] = [base] + list(__a ) # reformat the cluster to be a list of dict __lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster] duplicate_clusters.append(__a ) return duplicate_clusters def lowerCAmelCase ( self : Any , __a : int ) -> None: """simple docstring""" __lowercase : Tuple = self.get_duplicate_clusters() with open(__a , """w""" ) as f: json.dump(__a , __a ) def snake_case_ ( lowerCAmelCase_ : str ): __lowercase : Union[str, Any] = element __lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ): with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ): if data is not None: yield data def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ): __lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ): di.add(lowerCAmelCase_ , lowerCAmelCase_ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : List[str] = get_tokens(lowerCAmelCase_ ) __lowercase : Dict = get_tokens(lowerCAmelCase_ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) lowerCamelCase : List[str] = None def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ): __lowercase : Union[str, Any] = [] for elementa in cluster: __lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""] for elementa in extremes: __lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""] if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold: elementa["copies"] += 1 break else: __lowercase : Dict = 1 extremes.append(lowerCAmelCase_ ) return extremes def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ): global _shared_dataset __lowercase : Tuple = dataset __lowercase : Optional[int] = [] __lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ): extremes_list.append(lowerCAmelCase_ ) return extremes_list def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ): __lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster} __lowercase : int = {} __lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for extremes in extremes_clusters: for element in extremes: __lowercase : Optional[Any] = element __lowercase : int = duplicate_indices - set(extreme_dict.keys() ) __lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: __lowercase : List[str] = element["""base_index"""] in extreme_dict if element["is_extreme"]: __lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""] print(F"Original dataset size: {len(lowerCAmelCase_ )}" ) print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" ) print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" ) print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" ) print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" ) return ds_filter, duplicate_clusters
359
def snake_case_ ( lowerCAmelCase_ : int = 200 ): __lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200] __lowercase : List[str] = [0] * (pence + 1) __lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowerCAmelCase_ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_00) == 7_36_82
306
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : str , __a : Dict , __a : int=7 , __a : Dict=3 , __a : str=30 , __a : Optional[Any]=400 , __a : Tuple=True , __a : Optional[Any]=None , __a : str=True , __a : Any=1 / 255 , __a : Optional[Any]=True , __a : List[Any]=[0.5, 0.5, 0.5] , __a : Optional[Any]=[0.5, 0.5, 0.5] , __a : List[str]=True , ) -> int: """simple docstring""" __lowercase : int = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} __lowercase : Optional[Any] = parent __lowercase : Optional[int] = batch_size __lowercase : str = num_channels __lowercase : Dict = min_resolution __lowercase : Dict = max_resolution __lowercase : str = do_resize __lowercase : Optional[Any] = size __lowercase : Union[str, Any] = do_rescale __lowercase : Any = rescale_factor __lowercase : Tuple = do_normalize __lowercase : List[Any] = image_mean __lowercase : int = image_std __lowercase : Optional[int] = do_pad def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def lowerCAmelCase ( self : int , __a : List[Any] , __a : str=False ) -> List[str]: """simple docstring""" if not batched: __lowercase : List[str] = image_inputs[0] if isinstance(__a , Image.Image ): __lowercase : Dict = image.size else: __lowercase : Any = image.shape[1], image.shape[2] if w < h: __lowercase : int = int(self.size["""shortest_edge"""] * h / w ) __lowercase : Any = self.size["""shortest_edge"""] elif w > h: __lowercase : str = self.size["""shortest_edge"""] __lowercase : List[Any] = int(self.size["""shortest_edge"""] * w / h ) else: __lowercase : Tuple = self.size["""shortest_edge"""] __lowercase : Any = self.size["""shortest_edge"""] else: __lowercase : List[Any] = [] for image in image_inputs: __lowercase : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __lowercase : Tuple = max(__a , key=lambda __a : item[0] )[0] __lowercase : Union[str, Any] = max(__a , key=lambda __a : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' _A : Optional[int] = DetrImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase : Tuple = DetrImageProcessingTester(self ) @property def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" __lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__a , """image_mean""" ) ) self.assertTrue(hasattr(__a , """image_std""" ) ) self.assertTrue(hasattr(__a , """do_normalize""" ) ) self.assertTrue(hasattr(__a , """do_rescale""" ) ) self.assertTrue(hasattr(__a , """rescale_factor""" ) ) self.assertTrue(hasattr(__a , """do_resize""" ) ) self.assertTrue(hasattr(__a , """size""" ) ) self.assertTrue(hasattr(__a , """do_pad""" ) ) def lowerCAmelCase ( self : int ) -> Optional[int]: """simple docstring""" __lowercase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , __a ) __lowercase : Union[str, Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__a ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , __a ) def lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" pass def lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" __lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a ) for image in image_inputs: self.assertIsInstance(__a , Image.Image ) # Test not batched input __lowercase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __lowercase : List[str] = self.image_processor_tester.get_expected_values(__a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowercase : Dict = self.image_processor_tester.get_expected_values(__a , batched=__a ) __lowercase : Tuple = image_processing(__a , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" __lowercase : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a ) for image in image_inputs: self.assertIsInstance(__a , np.ndarray ) # Test not batched input __lowercase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __lowercase : Union[str, Any] = self.image_processor_tester.get_expected_values(__a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowercase : Dict = image_processing(__a , return_tensors="""pt""" ).pixel_values __lowercase : List[Any] = self.image_processor_tester.get_expected_values(__a , batched=__a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test not batched input __lowercase : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __lowercase : Optional[Any] = self.image_processor_tester.get_expected_values(__a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowercase : int = image_processing(__a , return_tensors="""pt""" ).pixel_values __lowercase : Optional[Any] = self.image_processor_tester.get_expected_values(__a , batched=__a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: __lowercase : List[str] = json.loads(f.read() ) __lowercase : Dict = {"""image_id""": 39769, """annotations""": target} # encode them __lowercase : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" ) __lowercase : Optional[int] = image_processing(images=__a , annotations=__a , return_tensors="""pt""" ) # verify pixel values __lowercase : Optional[Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __a ) __lowercase : int = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __a , atol=1E-4 ) ) # verify area __lowercase : List[Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __a ) ) # verify boxes __lowercase : str = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __a ) __lowercase : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __a , atol=1E-3 ) ) # verify image_id __lowercase : Tuple = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __a ) ) # verify is_crowd __lowercase : str = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __a ) ) # verify class_labels __lowercase : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __a ) ) # verify orig_size __lowercase : List[str] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __a ) ) # verify size __lowercase : str = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __a ) ) @slow def lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" __lowercase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: __lowercase : Tuple = json.loads(f.read() ) __lowercase : Any = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target} __lowercase : Optional[Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them __lowercase : Optional[int] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" ) __lowercase : List[Any] = image_processing(images=__a , annotations=__a , masks_path=__a , return_tensors="""pt""" ) # verify pixel values __lowercase : Any = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __a ) __lowercase : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __a , atol=1E-4 ) ) # verify area __lowercase : int = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __a ) ) # verify boxes __lowercase : List[str] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __a ) __lowercase : Optional[int] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __a , atol=1E-3 ) ) # verify image_id __lowercase : List[Any] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __a ) ) # verify is_crowd __lowercase : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __a ) ) # verify class_labels __lowercase : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __a ) ) # verify masks __lowercase : Dict = 822873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __a ) # verify orig_size __lowercase : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __a ) ) # verify size __lowercase : Dict = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __a ) )
360
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any: """simple docstring""" __lowercase : Optional[int] = parent __lowercase : List[str] = out_indices if out_indices is not None else [4] __lowercase : Optional[int] = stage_names __lowercase : Any = out_features __lowercase : Optional[Any] = backbone __lowercase : Optional[Any] = batch_size __lowercase : Union[str, Any] = image_size __lowercase : List[str] = num_channels __lowercase : str = use_pretrained_backbone __lowercase : str = is_training def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" __lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase : str = self.get_config() return config, pixel_values def lowerCAmelCase ( self : int ) -> str: """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict: """simple docstring""" __lowercase : Dict = TimmBackbone(config=__a ) model.to(__a ) model.eval() with torch.no_grad(): __lowercase : Optional[Any] = model(__a ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def lowerCAmelCase ( self : Any ) -> int: """simple docstring""" __lowercase : Union[str, Any] = self.prepare_config_and_inputs() __lowercase , __lowercase : str = config_and_inputs __lowercase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch @require_timm class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ): '''simple docstring''' _A : List[Any] = (TimmBackbone,) if is_torch_available() else () _A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {} _A : List[Any] = False _A : List[str] = False _A : Any = False _A : Optional[Any] = False def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase : str = TimmBackboneModelTester(self ) __lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a ) def lowerCAmelCase ( self : Any ) -> str: """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" __lowercase : Tuple = """resnet18""" __lowercase : Optional[int] = """microsoft/resnet-18""" __lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a ) __lowercase : Dict = AutoBackbone.from_pretrained(__a ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] ) __lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip("""TimmBackbone doesn't support feed forward chunking""" ) def lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" ) def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip("""TimmBackbone initialization is managed on the timm side""" ) def lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" ) def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" ) def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" pass @unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" ) def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" pass @unittest.skip("""model weights aren't tied in TimmBackbone.""" ) def lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" pass @unittest.skip("""model weights aren't tied in TimmBackbone.""" ) def lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : List[Any] ) -> Tuple: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" ) def lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't support output_attentions.""" ) def lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Safetensors is not supported by timm.""" ) def lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" pass def lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" __lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Optional[Any] = model_class(__a ) __lowercase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : List[str] = [*signature.parameters.keys()] __lowercase : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def lowerCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" __lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : Optional[Any] = True __lowercase : Union[str, Any] = self.has_attentions # no need to test all models as different heads yield the same functionality __lowercase : Union[str, Any] = self.all_model_classes[0] __lowercase : List[Any] = model_class(__a ) model.to(__a ) __lowercase : Optional[Any] = self._prepare_for_class(__a , __a ) __lowercase : Union[str, Any] = model(**__a ) __lowercase : Optional[int] = outputs[0][-1] # Encoder-/Decoder-only models __lowercase : Any = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __lowercase : Optional[int] = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__a ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : List[str] = model_class(__a ) model.to(__a ) model.eval() __lowercase : int = model(**__a ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __lowercase : Any = copy.deepcopy(__a ) __lowercase : Dict = None __lowercase : Tuple = model_class(__a ) model.to(__a ) model.eval() __lowercase : Optional[int] = model(**__a ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __lowercase : List[str] = copy.deepcopy(__a ) __lowercase : Optional[Any] = False __lowercase : str = model_class(__a ) model.to(__a ) model.eval() __lowercase : List[Any] = model(**__a )
306
0
import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' _A : str = MvpTokenizer _A : str = MvpTokenizerFast _A : Union[str, Any] = True _A : Optional[int] = filter_roberta_detectors def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" super().setUp() __lowercase : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __lowercase : Optional[Any] = dict(zip(__a , range(len(__a ) ) ) ) __lowercase : List[str] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __lowercase : List[str] = {"""unk_token""": """<unk>"""} __lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __lowercase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__a ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__a ) ) def lowerCAmelCase ( self : Dict , **__a : Tuple ) -> Union[str, Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a ) def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> List[str]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a ) def lowerCAmelCase ( self : Tuple , __a : str ) -> List[str]: """simple docstring""" return "lower newer", "lower newer" @cached_property def lowerCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" ) @cached_property def lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" ) @require_torch def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" __lowercase : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __lowercase : str = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowercase : List[Any] = tokenizer(__a , max_length=len(__a ) , padding=__a , return_tensors="""pt""" ) self.assertIsInstance(__a , __a ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __lowercase : List[Any] = batch.input_ids.tolist()[0] self.assertListEqual(__a , __a ) # Test that special tokens are reset @require_torch def lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" __lowercase : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowercase : str = tokenizer(__a , padding=__a , return_tensors="""pt""" ) # check if input_ids are returned and no labels self.assertIn("""input_ids""" , __a ) self.assertIn("""attention_mask""" , __a ) self.assertNotIn("""labels""" , __a ) self.assertNotIn("""decoder_attention_mask""" , __a ) @require_torch def lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" __lowercase : Dict = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowercase : Tuple = tokenizer(text_target=__a , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCAmelCase ( self : Any ) -> int: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowercase : Tuple = tokenizer( ["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__a , truncation=__a , return_tensors="""pt""" ) self.assertIsInstance(__a , __a ) self.assertEqual(batch.input_ids.shape , (2, 1024) ) @require_torch def lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" __lowercase : int = ["""A long paragraph for summarization."""] __lowercase : List[str] = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __lowercase : List[str] = tokenizer(__a , text_target=__a , return_tensors="""pt""" ) __lowercase : int = inputs["""input_ids"""] __lowercase : List[str] = inputs["""labels"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def lowerCAmelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" pass def lowerCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): __lowercase : Tuple = self.rust_tokenizer_class.from_pretrained(__a , **__a ) __lowercase : Union[str, Any] = self.tokenizer_class.from_pretrained(__a , **__a ) __lowercase : str = """A, <mask> AllenNLP sentence.""" __lowercase : List[Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a ) __lowercase : Any = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __lowercase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __lowercase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( __a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( __a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
361
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase : Optional[int] = logging.get_logger(__name__) lowerCamelCase : str = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } lowerCamelCase : Optional[Any] = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ): for attribute in key.split(""".""" ): __lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if weight_type is not None: __lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape else: __lowercase : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __lowercase : Dict = value elif weight_type == "weight_g": __lowercase : Union[str, Any] = value elif weight_type == "weight_v": __lowercase : List[Any] = value elif weight_type == "bias": __lowercase : int = value elif weight_type == "running_mean": __lowercase : List[Any] = value elif weight_type == "running_var": __lowercase : int = value elif weight_type == "num_batches_tracked": __lowercase : int = value elif weight_type == "inv_freq": __lowercase : Optional[Any] = value else: __lowercase : Any = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ): __lowercase : str = [] __lowercase : Any = fairseq_model.state_dict() __lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): __lowercase : Optional[Any] = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , ) __lowercase : List[str] = True else: for key, mapped_key in MAPPING.items(): __lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __lowercase : Tuple = True if "*" in mapped_key: __lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2] __lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ ) if "pos_bias_u" in name: __lowercase : Any = None elif "pos_bias_v" in name: __lowercase : Tuple = None elif "weight_g" in name: __lowercase : Union[str, Any] = """weight_g""" elif "weight_v" in name: __lowercase : Dict = """weight_v""" elif "bias" in name: __lowercase : Union[str, Any] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowercase : str = """weight""" elif "running_mean" in name: __lowercase : str = """running_mean""" elif "inv_freq" in name: __lowercase : List[Any] = """inv_freq""" elif "running_var" in name: __lowercase : Any = """running_var""" elif "num_batches_tracked" in name: __lowercase : Any = """num_batches_tracked""" else: __lowercase : Optional[int] = None set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) continue if not is_used: unused_weights.append(lowerCAmelCase_ ) logger.warning(F"Unused weights: {unused_weights}" ) def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ): __lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1] __lowercase : int = name.split(""".""" ) __lowercase : Optional[Any] = int(items[0] ) __lowercase : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __lowercase : Union[str, Any] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __lowercase : List[str] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) __lowercase : Union[str, Any] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) __lowercase : Dict = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(lowerCAmelCase_ ) @torch.no_grad() def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ): if config_path is not None: __lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" ) else: __lowercase : List[Any] = WavaVecaConformerConfig() if "rope" in checkpoint_path: __lowercase : Tuple = """rotary""" if is_finetuned: if dict_path: __lowercase : Any = Dictionary.load(lowerCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowercase : List[Any] = target_dict.pad_index __lowercase : Optional[int] = target_dict.bos_index __lowercase : List[Any] = target_dict.eos_index __lowercase : List[str] = len(target_dict.symbols ) __lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" ) if not os.path.isdir(lowerCAmelCase_ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) ) return os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) __lowercase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched __lowercase : int = 0 __lowercase : Any = 1 with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Dict = WavaVecaCTCTokenizer( lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , ) __lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False __lowercase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) __lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) __lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ ) else: __lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ ) if is_finetuned: __lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" ) __lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ ) __lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ ) __lowercase : Dict = model[0].eval() recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned ) hf_wavavec.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": lowerCamelCase : int = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) lowerCamelCase : Any = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
306
0
from __future__ import annotations import math def snake_case_ ( lowerCAmelCase_ : int ): if num <= 0: __lowercase : List[Any] = F"{num}: Invalid input, please enter a positive integer." raise ValueError(lowerCAmelCase_ ) __lowercase : int = [True] * (num + 1) __lowercase : Dict = [] __lowercase : str = 2 __lowercase : List[str] = int(math.sqrt(lowerCAmelCase_ ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(lowerCAmelCase_ ) # Set multiples of start be False for i in range(start * start , num + 1 , lowerCAmelCase_ ): if sieve[i] is True: __lowercase : Tuple = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(lowerCAmelCase_ ) return prime if __name__ == "__main__": print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
362
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError("""String lengths must match!""" ) __lowercase : str = 0 for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
306
0
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : Any = get_failure_array(lowerCAmelCase_ ) # 2) Step through text searching for pattern __lowercase : Optional[int] = 0, 0 # index into text, pattern while i < len(lowerCAmelCase_ ): if pattern[j] == text[i]: if j == (len(lowerCAmelCase_ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase : Optional[Any] = failure[j - 1] continue i += 1 return False def snake_case_ ( lowerCAmelCase_ : str ): __lowercase : List[Any] = [0] __lowercase : Optional[Any] = 0 __lowercase : List[Any] = 1 while j < len(lowerCAmelCase_ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase : List[str] = failure[i - 1] continue j += 1 failure.append(lowerCAmelCase_ ) return failure if __name__ == "__main__": # Test 1) lowerCamelCase : Dict = '''abc1abc12''' lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCamelCase : List[Any] = '''ABABX''' lowerCamelCase : List[Any] = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCamelCase : int = '''AAAB''' lowerCamelCase : Optional[int] = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCamelCase : Optional[Any] = '''abcdabcy''' lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCamelCase : Dict = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
363
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def snake_case_ ( lowerCAmelCase_ : Tuple ): if isinstance(lowerCAmelCase_ , collections.abc.Iterable ): return x return (x, x) @require_flax class lowerCAmelCase : '''simple docstring''' def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]: """simple docstring""" pass def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" pass def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" pass def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]: """simple docstring""" __lowercase : List[str] = np.abs((a - b) ).max() self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." ) def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : str = FlaxVisionTextDualEncoderModel(__a ) __lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str: """simple docstring""" __lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a ) __lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a ) __lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) __lowercase : int = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__a ) __lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a ) __lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) __lowercase : int = after_output[0] __lowercase : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__a , 1E-3 ) def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : str = self.get_vision_text_model(__a , __a ) __lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : Union[str, Any] = model( input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a ) __lowercase : Optional[int] = output.vision_model_output.attentions self.assertEqual(len(__a ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase : Optional[int] = to_atuple(vision_model.config.image_size ) __lowercase : List[str] = to_atuple(vision_model.config.patch_size ) __lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase : int = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase : Dict = output.text_model_output.attentions self.assertEqual(len(__a ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]: """simple docstring""" pt_model.to(__a ) pt_model.eval() # prepare inputs __lowercase : Union[str, Any] = inputs_dict __lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): __lowercase : Union[str, Any] = pt_model(**__a ).to_tuple() __lowercase : Tuple = fx_model(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__a ) __lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a ) __lowercase : Dict = fx_model_loaded(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__a ) __lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a ) pt_model_loaded.to(__a ) pt_model_loaded.eval() with torch.no_grad(): __lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 ) def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : str = VisionTextDualEncoderModel(__a ) __lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a ) __lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a ) __lowercase : Any = fx_state self.check_pt_flax_equivalence(__a , __a , __a ) def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str: """simple docstring""" __lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a ) __lowercase : Dict = FlaxVisionTextDualEncoderModel(__a ) __lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params ) self.check_pt_flax_equivalence(__a , __a , __a ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" __lowercase : Optional[Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__a ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : int = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__a ) def lowerCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" __lowercase : List[str] = self.prepare_config_and_inputs() self.check_save_load(**__a ) def lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" __lowercase : str = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__a ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[str] ) -> Tuple: """simple docstring""" __lowercase : Optional[Any] = self.prepare_config_and_inputs() __lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" ) __lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" ) __lowercase : Dict = config_inputs_dict self.check_equivalence_pt_to_flax(__a , __a , __a ) self.check_equivalence_flax_to_pt(__a , __a , __a ) @slow def lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs() __lowercase : Dict = model_a(**__a ) __lowercase : Any = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__a ) __lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a ) __lowercase : Optional[int] = model_a(**__a ) __lowercase : Tuple = after_outputs[0] __lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__a , 1E-5 ) @require_flax class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" __lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , ) __lowercase : int = 13 __lowercase : Union[str, Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __lowercase : Tuple = random_attention_mask([batch_size, 4] ) __lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict: """simple docstring""" __lowercase : int = FlaxViTModel(__a ) __lowercase : List[Any] = FlaxBertModel(__a ) return vision_model, text_model def lowerCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = FlaxViTModelTester(self ) __lowercase : str = FlaxBertModelTester(self ) __lowercase : List[str] = vit_model_tester.prepare_config_and_inputs() __lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase : Optional[int] = vision_config_and_inputs __lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , ) __lowercase : Tuple = 13 __lowercase : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __lowercase : List[Any] = random_attention_mask([batch_size, 4] ) __lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any: """simple docstring""" __lowercase : Dict = FlaxCLIPVisionModel(__a ) __lowercase : Optional[Any] = FlaxBertModel(__a ) return vision_model, text_model def lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" __lowercase : List[Any] = FlaxCLIPVisionModelTester(self ) __lowercase : Optional[Any] = FlaxBertModelTester(self ) __lowercase : Any = clip_model_tester.prepare_config_and_inputs() __lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase : Dict = vision_config_and_inputs __lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" __lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) __lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) __lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __lowercase : Tuple = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" ) __lowercase : Optional[int] = model(**__a ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
306
0
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] ): __lowercase : Any = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ): __lowercase : Optional[Any] = 0 while b > 0: if b & 1: __lowercase : List[str] = ((res % c) + (a % c)) % c a += a b >>= 1 return res
364
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
306
0
from manim import * class lowerCAmelCase ( __a ): '''simple docstring''' def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase : str = Rectangle(height=0.5 , width=0.5 ) __lowercase : Dict = Rectangle(height=0.25 , width=0.25 ) __lowercase : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) __lowercase : Optional[Any] = [mem.copy() for i in range(6 )] __lowercase : Optional[Any] = [mem.copy() for i in range(6 )] __lowercase : List[Any] = VGroup(*__a ).arrange(__a , buff=0 ) __lowercase : Optional[int] = VGroup(*__a ).arrange(__a , buff=0 ) __lowercase : int = VGroup(__a , __a ).arrange(__a , buff=0 ) __lowercase : List[Any] = Text("""CPU""" , font_size=24 ) __lowercase : List[str] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__a ) __lowercase : List[Any] = [mem.copy() for i in range(4 )] __lowercase : List[Any] = VGroup(*__a ).arrange(__a , buff=0 ) __lowercase : Any = Text("""GPU""" , font_size=24 ) __lowercase : int = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) gpu.move_to([-1, -1, 0] ) self.add(__a ) __lowercase : Optional[Any] = [mem.copy() for i in range(6 )] __lowercase : Union[str, Any] = VGroup(*__a ).arrange(__a , buff=0 ) __lowercase : Any = Text("""Model""" , font_size=24 ) __lowercase : Any = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) model.move_to([3, -1.0, 0] ) self.add(__a ) __lowercase : Dict = [] __lowercase : str = [] __lowercase : Tuple = [] for i, rect in enumerate(__a ): rect.set_stroke(__a ) __lowercase : Optional[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__a , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__a , buff=0.0 ) self.add(__a ) model_cpu_arr.append(__a ) self.add(*__a , *__a , *__a ) __lowercase : Dict = [mem.copy() for i in range(6 )] __lowercase : List[Any] = VGroup(*__a ).arrange(__a , buff=0 ) __lowercase : List[str] = Text("""Loaded Checkpoint""" , font_size=24 ) __lowercase : Union[str, Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) checkpoint.move_to([3, 0.5, 0] ) self.add(__a ) __lowercase : List[str] = [] __lowercase : Tuple = [] for i, rect in enumerate(__a ): __lowercase : List[Any] = fill.copy().set_fill(__a , opacity=0.7 ) target.move_to(__a ) ckpt_arr.append(__a ) __lowercase : Union[str, Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__a ) self.add(*__a , *__a ) __lowercase : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __lowercase : Dict = MarkupText( F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__a , __a ) __lowercase : Union[str, Any] = MarkupText( F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , ) blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__a ) __lowercase : Optional[Any] = MarkupText( F"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , ) step_a.move_to([2, 2, 0] ) __lowercase : Union[str, Any] = [meta_mem.copy() for i in range(6 )] __lowercase : Optional[int] = [meta_mem.copy() for i in range(6 )] __lowercase : Any = VGroup(*__a ).arrange(__a , buff=0 ) __lowercase : List[Any] = VGroup(*__a ).arrange(__a , buff=0 ) __lowercase : Dict = VGroup(__a , __a ).arrange(__a , buff=0 ) __lowercase : Optional[Any] = Text("""Disk""" , font_size=24 ) __lowercase : List[str] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__a , run_time=3 ) , Write(__a , run_time=1 ) , Create(__a , run_time=1 ) ) __lowercase : Optional[Any] = [] for i, rect in enumerate(__a ): __lowercase : Union[str, Any] = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__a , run_time=1.5 ) ) self.play(*__a ) self.play(FadeOut(__a ) ) __lowercase : Dict = MarkupText(F"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__a , run_time=3 ) ) self.play( FadeOut(__a , __a , *__a , *__a ) , ) self.wait()
365
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''') # parameters used in DuplicationIndex lowerCamelCase : Union[str, Any] = 10 lowerCamelCase : List[str] = 2_56 def snake_case_ ( lowerCAmelCase_ : List[str] ): if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS: return None __lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ ) for token in set(lowerCAmelCase_ ): min_hash.update(token.encode() ) return min_hash def snake_case_ ( lowerCAmelCase_ : str ): return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0} class lowerCAmelCase : '''simple docstring''' def __init__( self : List[str] , *, __a : float = 0.85 , ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[Any] = duplication_jaccard_threshold __lowercase : Optional[Any] = NUM_PERM __lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) __lowercase : List[str] = defaultdict(__a ) def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None: """simple docstring""" __lowercase : List[Any] = self._index.query(__a ) if code_key in self._index.keys: print(F"Duplicate key {code_key}" ) return self._index.insert(__a , __a ) if len(__a ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(__a ) break else: self._duplicate_clusters[close_duplicates[0]].add(__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]: """simple docstring""" __lowercase : Dict = [] for base, duplicates in self._duplicate_clusters.items(): __lowercase : List[str] = [base] + list(__a ) # reformat the cluster to be a list of dict __lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster] duplicate_clusters.append(__a ) return duplicate_clusters def lowerCAmelCase ( self : Any , __a : int ) -> None: """simple docstring""" __lowercase : Tuple = self.get_duplicate_clusters() with open(__a , """w""" ) as f: json.dump(__a , __a ) def snake_case_ ( lowerCAmelCase_ : str ): __lowercase , __lowercase : Union[str, Any] = element __lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ): with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ): if data is not None: yield data def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ): __lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ): di.add(lowerCAmelCase_ , lowerCAmelCase_ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : List[str] = get_tokens(lowerCAmelCase_ ) __lowercase : Dict = get_tokens(lowerCAmelCase_ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) lowerCamelCase : List[str] = None def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ): __lowercase : Union[str, Any] = [] for elementa in cluster: __lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""] for elementa in extremes: __lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""] if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold: elementa["copies"] += 1 break else: __lowercase : Dict = 1 extremes.append(lowerCAmelCase_ ) return extremes def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ): global _shared_dataset __lowercase : Tuple = dataset __lowercase : Optional[int] = [] __lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ): extremes_list.append(lowerCAmelCase_ ) return extremes_list def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ): __lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster} __lowercase : int = {} __lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for extremes in extremes_clusters: for element in extremes: __lowercase : Optional[Any] = element __lowercase : int = duplicate_indices - set(extreme_dict.keys() ) __lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: __lowercase : List[str] = element["""base_index"""] in extreme_dict if element["is_extreme"]: __lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""] print(F"Original dataset size: {len(lowerCAmelCase_ )}" ) print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" ) print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" ) print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" ) print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" ) return ds_filter, duplicate_clusters
306
0
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def snake_case_ ( lowerCAmelCase_ : str ): __lowercase : List[str] = [] embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight", F"stage{idx}.patch_embed.proj.weight", ) ) embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias", F"stage{idx}.patch_embed.proj.bias", ) ) embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight", F"stage{idx}.patch_embed.norm.weight", ) ) embed.append( ( F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias", F"stage{idx}.patch_embed.norm.bias", ) ) return embed def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ): __lowercase : Optional[Any] = [] attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked", F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked", F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked", F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight", F"stage{idx}.blocks.{cnt}.attn.proj_q.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias", F"stage{idx}.blocks.{cnt}.attn.proj_q.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight", F"stage{idx}.blocks.{cnt}.attn.proj_k.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias", F"stage{idx}.blocks.{cnt}.attn.proj_k.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight", F"stage{idx}.blocks.{cnt}.attn.proj_v.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias", F"stage{idx}.blocks.{cnt}.attn.proj_v.bias", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight", F"stage{idx}.blocks.{cnt}.attn.proj.weight", ) ) attention_weights.append( ( F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias", F"stage{idx}.blocks.{cnt}.attn.proj.bias", ) ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") ) attention_weights.append( (F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") ) return attention_weights def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ): __lowercase : str = [] token.append((F"cvt.encoder.stages.{idx}.cls_token", """stage2.cls_token""") ) return token def snake_case_ ( ): __lowercase : Any = [] head.append(("""layernorm.weight""", """norm.weight""") ) head.append(("""layernorm.bias""", """norm.bias""") ) head.append(("""classifier.weight""", """head.weight""") ) head.append(("""classifier.bias""", """head.bias""") ) return head def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any ): __lowercase : Optional[Any] = """imagenet-1k-id2label.json""" __lowercase : str = 1000 __lowercase : List[str] = """huggingface/label-files""" __lowercase : List[str] = num_labels __lowercase : Optional[int] = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) ) , """r""" ) ) __lowercase : str = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} __lowercase : str = idalabel __lowercase : Dict = {v: k for k, v in idalabel.items()} __lowercase : List[Any] = CvtConfig(num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13": __lowercase : Optional[int] = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21": __lowercase : str = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: __lowercase : Tuple = [2, 2, 20] __lowercase : str = [3, 12, 16] __lowercase : str = [192, 768, 1024] __lowercase : str = CvtForImageClassification(lowerCAmelCase_ ) __lowercase : Any = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" ) __lowercase : Tuple = image_size __lowercase : Optional[Any] = torch.load(lowerCAmelCase_ , map_location=torch.device("""cpu""" ) ) __lowercase : List[str] = OrderedDict() __lowercase : Optional[Any] = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: __lowercase : str = list_of_state_dict + cls_token(lowerCAmelCase_ ) __lowercase : List[Any] = list_of_state_dict + embeddings(lowerCAmelCase_ ) for cnt in range(config.depth[idx] ): __lowercase : int = list_of_state_dict + attention(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Union[str, Any] = list_of_state_dict + final() for gg in list_of_state_dict: print(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) ): __lowercase : Optional[int] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) image_processor.save_pretrained(lowerCAmelCase_ ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=3_84, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) lowerCamelCase : int = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
366
from ...processing_utils import ProcessorMixin class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[str] = ['''image_processor''', '''feature_extractor'''] _A : List[Any] = '''TvltImageProcessor''' _A : Optional[int] = '''TvltFeatureExtractor''' def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]: """simple docstring""" super().__init__(image_processor=__a , feature_extractor=__a ) __lowercase : Union[str, Any] = image_processor __lowercase : Tuple = feature_extractor def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict: """simple docstring""" if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) __lowercase : Tuple = None if images is not None: __lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a ) if images_mixed is not None: __lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a ) if audio is not None: __lowercase : Optional[Any] = self.feature_extractor( __a , *__a , sampling_rate=__a , mask_audio=__a , **__a ) __lowercase : Tuple = {} if audio is not None: output_dict.update(__a ) if images is not None: output_dict.update(__a ) if images_mixed_dict is not None: output_dict.update(__a ) return output_dict @property def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase : int = self.image_processor.model_input_names __lowercase : Union[str, Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
306
0
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
367
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class lowerCAmelCase : '''simple docstring''' def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]: """simple docstring""" __lowercase : Tuple = parent __lowercase : int = batch_size __lowercase : Any = seq_length __lowercase : str = is_training __lowercase : str = use_input_mask __lowercase : Optional[int] = use_token_type_ids __lowercase : List[Any] = use_labels __lowercase : Optional[Any] = vocab_size __lowercase : int = hidden_size __lowercase : List[Any] = num_hidden_layers __lowercase : Dict = num_attention_heads __lowercase : Any = intermediate_size __lowercase : Dict = hidden_act __lowercase : Union[str, Any] = hidden_dropout_prob __lowercase : List[Any] = attention_probs_dropout_prob __lowercase : List[str] = max_position_embeddings __lowercase : Union[str, Any] = type_vocab_size __lowercase : Dict = type_sequence_label_size __lowercase : Union[str, Any] = initializer_range __lowercase : List[Any] = num_labels __lowercase : str = num_choices __lowercase : Tuple = scope def lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" __lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : int = None if self.use_input_mask: __lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase : str = None __lowercase : Optional[Any] = None __lowercase : Tuple = None if self.use_labels: __lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowercase : int = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = EsmModel(config=__a ) model.to(__a ) model.eval() __lowercase : str = model(__a , attention_mask=__a ) __lowercase : List[Any] = model(__a ) __lowercase : Optional[int] = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase : List[str] = EsmForMaskedLM(config=__a ) model.to(__a ) model.eval() __lowercase : int = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase : Tuple = self.num_labels __lowercase : Any = EsmForTokenClassification(config=__a ) model.to(__a ) model.eval() __lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" __lowercase : Any = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) : List[str] = config_and_inputs __lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase ( __a , __a , unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = False _A : Any = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) _A : Optional[Any] = () _A : List[Any] = ( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) _A : Optional[Any] = True def lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" __lowercase : Optional[int] = EsmModelTester(self ) __lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowercase : Union[str, Any] = type self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : int ) -> Any: """simple docstring""" __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" __lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) @slow def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : List[str] = EsmModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] __lowercase : List[str] = EsmEmbeddings(config=__a ) __lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) __lowercase : int = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) __lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) def lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] __lowercase : Optional[Any] = EsmEmbeddings(config=__a ) __lowercase : Optional[int] = torch.empty(2 , 4 , 30 ) __lowercase : Tuple = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] __lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) __lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" pass @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" pass @require_torch class lowerCAmelCase ( __a ): '''simple docstring''' @slow def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): __lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() __lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __lowercase : List[str] = model(__a )[0] __lowercase : Union[str, Any] = 33 __lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , __a ) __lowercase : List[Any] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) ) @slow def lowerCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): __lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() __lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) __lowercase : Any = model(__a )[0] # compare the actual values for a slice. __lowercase : int = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
306
0
def snake_case_ ( lowerCAmelCase_ : int ): if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) __lowercase : Tuple = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
368
def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : int = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def snake_case_ ( lowerCAmelCase_ : int = 5000 ): __lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )] for i, pentagonal_i in enumerate(lowerCAmelCase_ ): for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ): __lowercase : int = pentagonal_nums[j] __lowercase : Optional[int] = pentagonal_i + pentagonal_j __lowercase : Union[str, Any] = pentagonal_j - pentagonal_i if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ): return b return -1 if __name__ == "__main__": print(f'''{solution() = }''')
306
0
from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class lowerCAmelCase : '''simple docstring''' _A : torch.Tensor # [batch_size x 3] _A : torch.Tensor # [batch_size x 3] _A : torch.Tensor # [batch_size x 3] _A : torch.Tensor # [batch_size x 3] _A : int _A : int _A : float _A : float _A : Tuple[int] def lowerCAmelCase ( self : int ) -> int: """simple docstring""" assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def lowerCAmelCase ( self : Dict ) -> torch.Tensor: """simple docstring""" __lowercase : Union[str, Any] = torch.arange(self.height * self.width ) __lowercase : int = torch.stack( [ pixel_indices % self.width, torch.div(__a , self.width , rounding_mode="""trunc""" ), ] , axis=1 , ) return coords @property def lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" __lowercase : Union[str, Any] = self.shape __lowercase : str = int(np.prod(__a ) ) __lowercase : Optional[Any] = self.get_image_coords() __lowercase : Dict = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) __lowercase : Dict = self.get_camera_rays(__a ) __lowercase : Union[str, Any] = rays.view(__a , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def lowerCAmelCase ( self : List[str] , __a : torch.Tensor ) -> torch.Tensor: """simple docstring""" __lowercase : Optional[int] = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] __lowercase : Optional[Any] = coords.view(__a , -1 , 2 ) __lowercase : Dict = self.resolution() __lowercase : Optional[Any] = self.fov() __lowercase : List[str] = (flat.float() / (res - 1)) * 2 - 1 __lowercase : Union[str, Any] = fracs * torch.tan(fov / 2 ) __lowercase : Any = fracs.view(__a , -1 , 2 ) __lowercase : Optional[int] = ( self.z.view(__a , 1 , 3 ) + self.x.view(__a , 1 , 3 ) * fracs[:, :, :1] + self.y.view(__a , 1 , 3 ) * fracs[:, :, 1:] ) __lowercase : Tuple = directions / directions.norm(dim=-1 , keepdim=__a ) __lowercase : Any = torch.stack( [ torch.broadcast_to(self.origin.view(__a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(__a , *__a , 2 , 3 ) def lowerCAmelCase ( self : Optional[int] , __a : int , __a : int ) -> "DifferentiableProjectiveCamera": """simple docstring""" assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=__a , height=__a , x_fov=self.x_fov , y_fov=self.y_fov , ) def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : Optional[int] = [] __lowercase : Dict = [] __lowercase : List[str] = [] __lowercase : Union[str, Any] = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): __lowercase : Optional[int] = np.array([np.sin(lowerCAmelCase_ ), np.cos(lowerCAmelCase_ ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) __lowercase : int = -z * 4 __lowercase : Tuple = np.array([np.cos(lowerCAmelCase_ ), -np.sin(lowerCAmelCase_ ), 0.0] ) __lowercase : str = np.cross(lowerCAmelCase_ , lowerCAmelCase_ ) origins.append(lowerCAmelCase_ ) xs.append(lowerCAmelCase_ ) ys.append(lowerCAmelCase_ ) zs.append(lowerCAmelCase_ ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , width=lowerCAmelCase_ , height=lowerCAmelCase_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCAmelCase_ )) , )
369
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase ( __a ): '''simple docstring''' _A : Optional[Any] = (DPMSolverSDEScheduler,) _A : Dict = 10 def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]: """simple docstring""" __lowercase : Any = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**__a ) return config def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__a ) def lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__a ) def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = self.scheduler_classes[0] __lowercase : List[str] = self.get_scheduler_config() __lowercase : Any = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase : Optional[Any] = self.dummy_model() __lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase : Optional[Any] = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): __lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[Any] = model(__a , __a ) __lowercase : Optional[Any] = scheduler.step(__a , __a , __a ) __lowercase : str = output.prev_sample __lowercase : Optional[Any] = torch.sum(torch.abs(__a ) ) __lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase : Tuple = self.scheduler_classes[0] __lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" ) __lowercase : int = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase : Optional[int] = self.dummy_model() __lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase : Dict = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): __lowercase : Dict = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[int] = model(__a , __a ) __lowercase : Optional[int] = scheduler.step(__a , __a , __a ) __lowercase : int = output.prev_sample __lowercase : Optional[Any] = torch.sum(torch.abs(__a ) ) __lowercase : List[str] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3 def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = self.scheduler_classes[0] __lowercase : Dict = self.get_scheduler_config() __lowercase : Optional[int] = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) __lowercase : int = self.dummy_model() __lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: __lowercase : int = scheduler.scale_model_input(__a , __a ) __lowercase : List[str] = model(__a , __a ) __lowercase : List[str] = scheduler.step(__a , __a , __a ) __lowercase : int = output.prev_sample __lowercase : List[Any] = torch.sum(torch.abs(__a ) ) __lowercase : Optional[Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase : str = self.scheduler_classes[0] __lowercase : List[Any] = self.get_scheduler_config() __lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) __lowercase : List[str] = self.dummy_model() __lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma __lowercase : str = sample.to(__a ) for t in scheduler.timesteps: __lowercase : List[Any] = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[Any] = model(__a , __a ) __lowercase : Any = scheduler.step(__a , __a , __a ) __lowercase : Optional[Any] = output.prev_sample __lowercase : Any = torch.sum(torch.abs(__a ) ) __lowercase : Optional[Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
306
0
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class lowerCAmelCase ( __a ): '''simple docstring''' _A : str = CustomTokenizer pass
370
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate lowerCamelCase : str = trt.Logger(trt.Logger.WARNING) lowerCamelCase : Any = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) lowerCamelCase : Optional[Any] = logging.getLogger(__name__) lowerCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--onnx_model_path''', default=None, type=str, required=True, help='''Path to ONNX model: ''', ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''The output directory where the model checkpoints and predictions will be written.''', ) # Other parameters parser.add_argument( '''--tokenizer_name''', default='''''', type=str, required=True, help='''Pretrained tokenizer name or path if not the same as model_name''', ) parser.add_argument( '''--version_2_with_negative''', action='''store_true''', help='''If true, the SQuAD examples contain some that do not have an answer.''', ) parser.add_argument( '''--null_score_diff_threshold''', type=float, default=0.0, help='''If null_score - best_non_null is greater than the threshold predict null.''', ) parser.add_argument( '''--max_seq_length''', default=3_84, type=int, help=( '''The maximum total input sequence length after WordPiece tokenization. Sequences ''' '''longer than this will be truncated, and sequences shorter than this will be padded.''' ), ) parser.add_argument( '''--doc_stride''', default=1_28, type=int, help='''When splitting up a long document into chunks, how much stride to take between chunks.''', ) parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''') parser.add_argument( '''--n_best_size''', default=20, type=int, help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''', ) parser.add_argument( '''--max_answer_length''', default=30, type=int, help=( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ), ) parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''') parser.add_argument( '''--dataset_name''', type=str, default=None, required=True, help='''The name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--dataset_config_name''', type=str, default=None, help='''The configuration name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.''' ) parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''') parser.add_argument( '''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision instead of 32-bit''', ) parser.add_argument( '''--int8''', action='''store_true''', help='''Whether to use INT8''', ) lowerCamelCase : Dict = parser.parse_args() if args.tokenizer_name: lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) logger.info('''Training/evaluation parameters %s''', args) lowerCamelCase : List[str] = args.per_device_eval_batch_size lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties lowerCamelCase : List[str] = True lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine''' if args.fpaa: lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine''' if args.inta: lowerCamelCase : int = '''temp_engine/bert-int8.engine''' # import ONNX file if not os.path.exists('''temp_engine'''): os.makedirs('''temp_engine''') lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, '''rb''') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)] lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: lowerCamelCase : List[str] = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) lowerCamelCase : Optional[int] = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) lowerCamelCase : Optional[Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, '''wb''') as f: f.write(engine.serialize()) def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ): __lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) __lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) __lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ ) # start time __lowercase : Optional[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Synchronize the stream and take time stream.synchronize() # end time __lowercase : int = time.time() __lowercase : Union[str, Any] = end_time - start_time __lowercase : Any = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. lowerCamelCase : Tuple = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('''Evaluation requires a dataset name''') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0] lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1] lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). lowerCamelCase : Dict = tokenizer.padding_side == '''right''' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length) def snake_case_ ( lowerCAmelCase_ : int ): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace __lowercase : str = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __lowercase : List[str] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __lowercase : Any = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ ) __lowercase : List[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __lowercase : List[str] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __lowercase : Dict = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples lowerCamelCase : Tuple = raw_datasets['''validation'''] # Validation Feature Creation lowerCamelCase : Optional[int] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='''Running tokenizer on validation dataset''', ) lowerCamelCase : Union[str, Any] = default_data_collator lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping''']) lowerCamelCase : List[str] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ): # Post-processing: we match the start logits and end logits to answers in the original context. __lowercase : int = postprocess_qa_predictions( examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __lowercase : Optional[int] = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: __lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] __lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ ) lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''') # Evaluation! logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path) with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def snake_case_ ( lowerCAmelCase_ : str ): return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize # Allocate device memory for inputs and outputs. lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes) lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. lowerCamelCase : Optional[int] = cuda.Stream() # Evaluation logger.info('''***** Running Evaluation *****''') logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') lowerCamelCase : int = 0.0 lowerCamelCase : List[str] = 0 lowerCamelCase : List[str] = timeit.default_timer() lowerCamelCase : List[Any] = None for step, batch in enumerate(eval_dataloader): lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs lowerCamelCase : Optional[Any] = torch.tensor(start_logits) lowerCamelCase : List[str] = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00) lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00) lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00) if all_preds is not None: lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset)) lowerCamelCase : Dict = timeit.default_timer() - start_time logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter)) logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00)) logger.info('''Total Number of Inference = %d''', niter) lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds) lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
306
0
"""simple docstring""" from __future__ import annotations from math import pi def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ): if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if inductance < 0: raise ValueError("""Inductance cannot be negative""" ) if frequency < 0: raise ValueError("""Frequency cannot be negative""" ) if reactance < 0: raise ValueError("""Inductive reactance cannot be negative""" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
371
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase : str = { '''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''', } class lowerCAmelCase ( __a ): '''simple docstring''' _A : int = '''nllb-moe''' _A : List[str] = ['''past_key_values'''] _A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any: """simple docstring""" __lowercase : int = vocab_size __lowercase : List[Any] = max_position_embeddings __lowercase : Tuple = d_model __lowercase : str = encoder_ffn_dim __lowercase : List[str] = encoder_layers __lowercase : int = encoder_attention_heads __lowercase : List[Any] = decoder_ffn_dim __lowercase : int = decoder_layers __lowercase : Optional[int] = decoder_attention_heads __lowercase : Union[str, Any] = dropout __lowercase : str = attention_dropout __lowercase : Any = activation_dropout __lowercase : List[Any] = activation_function __lowercase : List[str] = init_std __lowercase : Optional[int] = encoder_layerdrop __lowercase : str = decoder_layerdrop __lowercase : Dict = use_cache __lowercase : Optional[Any] = encoder_layers __lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True __lowercase : List[Any] = router_z_loss_coef __lowercase : Tuple = router_aux_loss_coef __lowercase : str = decoder_sparse_step __lowercase : Any = encoder_sparse_step __lowercase : str = num_experts __lowercase : List[Any] = expert_capacity __lowercase : int = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" ) __lowercase : Optional[int] = router_dtype __lowercase : Any = router_ignore_padding_tokens __lowercase : Optional[Any] = batch_prioritized_routing __lowercase : str = second_expert_policy __lowercase : List[str] = normalize_router_prob_before_dropping __lowercase : List[Any] = moe_eval_capacity_token_fraction __lowercase : List[str] = moe_token_dropout __lowercase : Optional[Any] = output_router_logits super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
306
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : int = logging.get_logger(__name__) lowerCamelCase : str = { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''', } class lowerCAmelCase ( __a ): '''simple docstring''' _A : Union[str, Any] = '''lxmert''' _A : Tuple = {} def __init__( self : Tuple , __a : Dict=30522 , __a : Union[str, Any]=768 , __a : Optional[int]=12 , __a : Optional[Any]=9500 , __a : Optional[Any]=1600 , __a : int=400 , __a : List[str]=3072 , __a : Union[str, Any]="gelu" , __a : List[str]=0.1 , __a : str=0.1 , __a : Any=512 , __a : List[Any]=2 , __a : Union[str, Any]=0.02 , __a : Dict=1E-12 , __a : Optional[Any]=9 , __a : Any=5 , __a : Any=5 , __a : int=2048 , __a : Dict=4 , __a : List[str]=6.67 , __a : Any=True , __a : List[Any]=True , __a : Union[str, Any]=True , __a : int=True , __a : List[Any]=True , __a : List[Any]=True , __a : int=True , **__a : List[Any] , ) -> Dict: """simple docstring""" __lowercase : List[Any] = vocab_size __lowercase : Tuple = hidden_size __lowercase : List[Any] = num_attention_heads __lowercase : List[str] = hidden_act __lowercase : Optional[int] = intermediate_size __lowercase : List[Any] = hidden_dropout_prob __lowercase : Any = attention_probs_dropout_prob __lowercase : List[str] = max_position_embeddings __lowercase : Optional[int] = type_vocab_size __lowercase : List[Any] = initializer_range __lowercase : Dict = layer_norm_eps __lowercase : List[str] = num_qa_labels __lowercase : List[str] = num_object_labels __lowercase : List[str] = num_attr_labels __lowercase : Tuple = l_layers __lowercase : str = x_layers __lowercase : List[Any] = r_layers __lowercase : List[Any] = visual_feat_dim __lowercase : Optional[int] = visual_pos_dim __lowercase : str = visual_loss_normalizer __lowercase : int = task_matched __lowercase : int = task_mask_lm __lowercase : Optional[Any] = task_obj_predict __lowercase : List[Any] = task_qa __lowercase : Tuple = visual_obj_loss __lowercase : Tuple = visual_attr_loss __lowercase : Optional[Any] = visual_feat_loss __lowercase : Dict = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers} super().__init__(**__a )
350
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase : Optional[Any] = { '''configuration_poolformer''': [ '''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PoolFormerConfig''', '''PoolFormerOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = ['''PoolFormerFeatureExtractor'''] lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[str] = [ '''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PoolFormerForImageClassification''', '''PoolFormerModel''', '''PoolFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
306
0
def snake_case_ ( lowerCAmelCase_ : int = 10**9 ): __lowercase : Tuple = 1 __lowercase : Union[str, Any] = 2 __lowercase : Union[str, Any] = 0 __lowercase : Dict = 0 __lowercase : List[Any] = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __lowercase : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'''{solution() = }''')
351
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : List[str] = 2 __lowercase : Union[str, Any] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(lowerCAmelCase_ ) if n > 1: factors.append(lowerCAmelCase_ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
306
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : Any = { '''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class lowerCAmelCase ( __a ): '''simple docstring''' _A : Optional[int] = '''donut-swin''' _A : Dict = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Union[str, Any] , __a : Optional[Any]=224 , __a : List[Any]=4 , __a : List[str]=3 , __a : List[Any]=96 , __a : Optional[Any]=[2, 2, 6, 2] , __a : Union[str, Any]=[3, 6, 12, 24] , __a : Any=7 , __a : str=4.0 , __a : Dict=True , __a : List[str]=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.1 , __a : List[str]="gelu" , __a : str=False , __a : Tuple=0.02 , __a : List[str]=1E-5 , **__a : Optional[Any] , ) -> Optional[int]: """simple docstring""" super().__init__(**__a ) __lowercase : Dict = image_size __lowercase : List[str] = patch_size __lowercase : Optional[Any] = num_channels __lowercase : List[str] = embed_dim __lowercase : Dict = depths __lowercase : int = len(__a ) __lowercase : List[Any] = num_heads __lowercase : Tuple = window_size __lowercase : Optional[int] = mlp_ratio __lowercase : Optional[Any] = qkv_bias __lowercase : Union[str, Any] = hidden_dropout_prob __lowercase : Any = attention_probs_dropout_prob __lowercase : Any = drop_path_rate __lowercase : List[str] = hidden_act __lowercase : Tuple = use_absolute_embeddings __lowercase : Optional[Any] = layer_norm_eps __lowercase : Tuple = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowercase : str = int(embed_dim * 2 ** (len(__a ) - 1) )
352
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __lowercase : List[str] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __lowercase : Optional[Any] = model(__a )["""last_hidden_state"""] __lowercase : Any = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __a ) # compare the actual values for a slice. __lowercase : Dict = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
306
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCamelCase : Optional[int] = { '''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''], '''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''], '''processing_wav2vec2''': ['''Wav2Vec2Processor'''], '''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = [ '''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Wav2Vec2ForAudioFrameClassification''', '''Wav2Vec2ForCTC''', '''Wav2Vec2ForMaskedLM''', '''Wav2Vec2ForPreTraining''', '''Wav2Vec2ForSequenceClassification''', '''Wav2Vec2ForXVector''', '''Wav2Vec2Model''', '''Wav2Vec2PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[int] = [ '''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWav2Vec2ForCTC''', '''TFWav2Vec2Model''', '''TFWav2Vec2PreTrainedModel''', '''TFWav2Vec2ForSequenceClassification''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Dict = [ '''FlaxWav2Vec2ForCTC''', '''FlaxWav2Vec2ForPreTraining''', '''FlaxWav2Vec2Model''', '''FlaxWav2Vec2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
353
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : Optional[Any] = len(lowerCAmelCase_ ) __lowercase : str = len(lowerCAmelCase_ ) __lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __lowercase : Tuple = True for i in range(lowerCAmelCase_ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __lowercase : Optional[Any] = True if a[i].islower(): __lowercase : Dict = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
306
0
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any: """simple docstring""" __lowercase : Optional[int] = parent __lowercase : List[str] = out_indices if out_indices is not None else [4] __lowercase : Optional[int] = stage_names __lowercase : Any = out_features __lowercase : Optional[Any] = backbone __lowercase : Optional[Any] = batch_size __lowercase : Union[str, Any] = image_size __lowercase : List[str] = num_channels __lowercase : str = use_pretrained_backbone __lowercase : str = is_training def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" __lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase : str = self.get_config() return config, pixel_values def lowerCAmelCase ( self : int ) -> str: """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict: """simple docstring""" __lowercase : Dict = TimmBackbone(config=__a ) model.to(__a ) model.eval() with torch.no_grad(): __lowercase : Optional[Any] = model(__a ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def lowerCAmelCase ( self : Any ) -> int: """simple docstring""" __lowercase : Union[str, Any] = self.prepare_config_and_inputs() __lowercase : str = config_and_inputs __lowercase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch @require_timm class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ): '''simple docstring''' _A : List[Any] = (TimmBackbone,) if is_torch_available() else () _A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {} _A : List[Any] = False _A : List[str] = False _A : Any = False _A : Optional[Any] = False def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase : str = TimmBackboneModelTester(self ) __lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a ) def lowerCAmelCase ( self : Any ) -> str: """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" __lowercase : Tuple = """resnet18""" __lowercase : Optional[int] = """microsoft/resnet-18""" __lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a ) __lowercase : Dict = AutoBackbone.from_pretrained(__a ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] ) __lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip("""TimmBackbone doesn't support feed forward chunking""" ) def lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" ) def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip("""TimmBackbone initialization is managed on the timm side""" ) def lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" ) def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" ) def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" pass @unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" ) def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" pass @unittest.skip("""model weights aren't tied in TimmBackbone.""" ) def lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" pass @unittest.skip("""model weights aren't tied in TimmBackbone.""" ) def lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : List[Any] ) -> Tuple: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" ) def lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't support output_attentions.""" ) def lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Safetensors is not supported by timm.""" ) def lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" pass def lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Optional[Any] = model_class(__a ) __lowercase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : List[str] = [*signature.parameters.keys()] __lowercase : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def lowerCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : Optional[Any] = True __lowercase : Union[str, Any] = self.has_attentions # no need to test all models as different heads yield the same functionality __lowercase : Union[str, Any] = self.all_model_classes[0] __lowercase : List[Any] = model_class(__a ) model.to(__a ) __lowercase : Optional[Any] = self._prepare_for_class(__a , __a ) __lowercase : Union[str, Any] = model(**__a ) __lowercase : Optional[int] = outputs[0][-1] # Encoder-/Decoder-only models __lowercase : Any = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __lowercase : Optional[int] = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__a ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : List[str] = model_class(__a ) model.to(__a ) model.eval() __lowercase : int = model(**__a ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __lowercase : Any = copy.deepcopy(__a ) __lowercase : Dict = None __lowercase : Tuple = model_class(__a ) model.to(__a ) model.eval() __lowercase : Optional[int] = model(**__a ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __lowercase : List[str] = copy.deepcopy(__a ) __lowercase : Optional[Any] = False __lowercase : str = model_class(__a ) model.to(__a ) model.eval() __lowercase : List[Any] = model(**__a )
354
from scipy.stats import spearmanr import datasets lowerCamelCase : List[str] = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' lowerCamelCase : List[str] = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' lowerCamelCase : Union[str, Any] = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , ) def lowerCAmelCase ( self : List[Any] , __a : str , __a : Any , __a : Optional[int]=False ) -> List[str]: """simple docstring""" __lowercase : Optional[Any] = spearmanr(__a , __a ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
306
0
def snake_case_ ( lowerCAmelCase_ : int = 100 ): __lowercase : Tuple = n * (n + 1) * (2 * n + 1) / 6 __lowercase : List[Any] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(f'''{solution() = }''')
355
from __future__ import annotations def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : Any = get_failure_array(lowerCAmelCase_ ) # 2) Step through text searching for pattern __lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern while i < len(lowerCAmelCase_ ): if pattern[j] == text[i]: if j == (len(lowerCAmelCase_ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase : Optional[Any] = failure[j - 1] continue i += 1 return False def snake_case_ ( lowerCAmelCase_ : str ): __lowercase : List[Any] = [0] __lowercase : Optional[Any] = 0 __lowercase : List[Any] = 1 while j < len(lowerCAmelCase_ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase : List[str] = failure[i - 1] continue j += 1 failure.append(lowerCAmelCase_ ) return failure if __name__ == "__main__": # Test 1) lowerCamelCase : Dict = '''abc1abc12''' lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCamelCase : List[Any] = '''ABABX''' lowerCamelCase : List[Any] = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowerCamelCase : int = '''AAAB''' lowerCamelCase : Optional[int] = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowerCamelCase : Optional[Any] = '''abcdabcy''' lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowerCamelCase : Dict = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
306
0
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ): if density <= 0: raise ValueError("""Impossible fluid density""" ) if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
356
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[str] = ['''pixel_values'''] def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None: """simple docstring""" super().__init__(**__a ) __lowercase : Dict = size if size is not None else {"""shortest_edge""": 224} __lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a ) __lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" ) __lowercase : Optional[int] = do_resize __lowercase : Union[str, Any] = size __lowercase : List[Any] = resample __lowercase : Any = do_center_crop __lowercase : Dict = crop_size __lowercase : int = do_rescale __lowercase : Tuple = rescale_factor __lowercase : List[Any] = do_normalize __lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD __lowercase : Union[str, Any] = do_convert_rgb def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray: """simple docstring""" __lowercase : Dict = get_size_dict(__a , default_to_square=__a ) if "shortest_edge" not in size: raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) __lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray: """simple docstring""" __lowercase : Tuple = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a ) def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]: """simple docstring""" return rescale(__a , scale=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray: """simple docstring""" return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image: """simple docstring""" __lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize __lowercase : Dict = size if size is not None else self.size __lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a ) __lowercase : int = resample if resample is not None else self.resample __lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size __lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a ) __lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize __lowercase : Tuple = image_mean if image_mean is not None else self.image_mean __lowercase : str = image_std if image_std is not None else self.image_std __lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowercase : Union[str, Any] = make_list_of_images(__a ) if not valid_images(__a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images] # All transformations expect numpy arrays. __lowercase : Any = [to_numpy_array(__a ) for image in images] if do_resize: __lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images] if do_center_crop: __lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images] if do_rescale: __lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images] if do_normalize: __lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images] __lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images] __lowercase : Optional[int] = {"""pixel_values""": images} return BatchFeature(data=__a , tensor_type=__a )
306
0
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex lowerCamelCase : Tuple = logging.getLogger(__name__) class lowerCAmelCase : '''simple docstring''' def __init__( self : Dict ) -> int: """simple docstring""" __lowercase : Tuple = False def lowerCAmelCase ( self : Dict , __a : Tuple , __a : Dict , __a : Optional[int] , __a : Union[str, Any] ) -> List[str]: """simple docstring""" if not self.initialized: __lowercase : Union[str, Any] = RagRetriever( __a , question_encoder_tokenizer=__a , generator_tokenizer=__a , index=__a , init_retrieval=__a , ) __lowercase : int = True def lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" self.retriever.index.init_index() def lowerCAmelCase ( self : str , __a : Union[str, Any] , __a : str ) -> Any: """simple docstring""" __lowercase : Union[str, Any] = self.retriever._main_retrieve(__a , __a ) return doc_ids, retrieved_doc_embeds class lowerCAmelCase ( __a ): '''simple docstring''' def __init__( self : str , __a : Any , __a : int , __a : Dict , __a : int , __a : Union[str, Any]=None ) -> str: """simple docstring""" if index is not None and index.is_initialized() and len(__a ) > 0: raise ValueError( """When using Ray for distributed fine-tuning, """ """you'll need to provide the paths instead, """ """as the dataset and the index are loaded """ """separately. More info in examples/rag/use_own_knowledge_dataset.py """ ) super().__init__( __a , question_encoder_tokenizer=__a , generator_tokenizer=__a , index=__a , init_retrieval=__a , ) __lowercase : Dict = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(__a , __a , __a , __a ) for worker in self.retrieval_workers ] ) def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" logger.info("""initializing retrieval""" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def lowerCAmelCase ( self : str , __a : Optional[int] , __a : int ) -> Optional[Any]: """simple docstring""" if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. __lowercase : Union[str, Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] __lowercase : Optional[int] = ray.get(random_worker.retrieve.remote(__a , __a ) ) else: __lowercase : List[str] = self._main_retrieve(__a , __a ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__a ) @classmethod def lowerCAmelCase ( cls : Union[str, Any] , __a : str , __a : Tuple=None , **__a : Optional[int] ) -> Tuple: """simple docstring""" return super(__a , cls ).get_tokenizers(__a , __a , **__a ) @classmethod def lowerCAmelCase ( cls : List[Any] , __a : int , __a : Dict , __a : List[str]=None , **__a : List[str] ) -> Any: """simple docstring""" __lowercase : int = kwargs.pop("""config""" , __a ) or RagConfig.from_pretrained(__a , **__a ) __lowercase : Tuple = RagTokenizer.from_pretrained(__a , config=__a ) __lowercase : Union[str, Any] = rag_tokenizer.question_encoder __lowercase : Tuple = rag_tokenizer.generator if indexed_dataset is not None: __lowercase : int = """custom""" __lowercase : Dict = CustomHFIndex(config.retrieval_vector_size , __a ) else: __lowercase : List[Any] = cls._build_index(__a ) return cls( __a , question_encoder_tokenizer=__a , generator_tokenizer=__a , retrieval_workers=__a , index=__a , )
357
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ): __lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ ) return new.join(lowerCAmelCase_ ) def snake_case_ ( lowerCAmelCase_ : List[Any] ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : List[str] = {} __lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." ) if "res_path" in key: __lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" ) if key.endswith(""".w""" ): __lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 ) if key.endswith(""".b""" ): __lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 ) __lowercase : Dict = value.float() return upgrade @torch.no_grad() def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ): from dall_e import Encoder __lowercase : Any = Encoder() if os.path.exists(lowerCAmelCase_ ): __lowercase : List[Any] = torch.load(lowerCAmelCase_ ) else: __lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : int = ckpt.state_dict() encoder.load_state_dict(lowerCAmelCase_ ) if config_path is not None: __lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ ) else: __lowercase : List[str] = FlavaImageCodebookConfig() __lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval() __lowercase : List[Any] = encoder.state_dict() __lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ ) hf_model.load_state_dict(lowerCAmelCase_ ) __lowercase : Dict = hf_model.state_dict() __lowercase : Tuple = count_parameters(lowerCAmelCase_ ) __lowercase : Tuple = count_parameters(lowerCAmelCase_ ) assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(lowerCAmelCase_ ) else: return hf_state_dict if __name__ == "__main__": lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') lowerCamelCase : Union[str, Any] = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
306
0
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ): __lowercase : Optional[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def snake_case_ ( ): print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
358
import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging lowerCamelCase : Tuple = logging.get_logger(__name__) logging.set_verbosity_info() def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): if "xprophetnet" in prophetnet_checkpoint_path: __lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ ) __lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ ) else: __lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ ) __lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ ) __lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""] __lowercase : Optional[int] = { """self_attn""": """ngram_self_attn""", """cross_attn""": """encoder_attn""", """cross_attn_layer_norm""": """encoder_attn_layer_norm""", """feed_forward_layer_norm""": """final_layer_norm""", """feed_forward""": """""", """intermediate""": """fc1""", """output""": """fc2""", """key_proj""": """k_proj""", """query_proj""": """q_proj""", """value_proj""": """v_proj""", """word_embeddings""": """embed_tokens""", """embeddings_layer_norm""": """emb_layer_norm""", """relative_pos_embeddings""": """relative_linear""", """ngram_embeddings""": """ngram_input_embed""", """position_embeddings""": """embed_positions""", } for key in loading_info["missing_keys"]: __lowercase : Tuple = key.split(""".""" ) if attributes[0] == "lm_head": __lowercase : str = prophet __lowercase : List[str] = prophet_old else: __lowercase : Tuple = prophet.prophetnet __lowercase : Union[str, Any] = prophet_old.model __lowercase : Optional[Any] = False for attribute in attributes: if attribute in mapping: __lowercase : Optional[int] = mapping[attribute] if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0: __lowercase : str = attribute elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : List[Any] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" __lowercase : Any = old_model.weight logger.info(F"{attribute} is initialized." ) __lowercase : Any = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" __lowercase : Dict = old_model.bias logger.info(F"{attribute} is initialized" ) __lowercase : int = True break elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ): __lowercase : Dict = old_model.in_proj_weight.shape[0] // 3 __lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": __lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) __lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": __lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) __lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": __lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) __lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) __lowercase : int = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." __lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] ) __lowercase : int = True break if attribute.isdigit(): __lowercase : Tuple = model[int(lowerCAmelCase_ )] __lowercase : int = old_model[int(lowerCAmelCase_ )] else: __lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if old_attribute == "": __lowercase : int = old_model else: if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError(F"{old_model} does not have {old_attribute}" ) __lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if not is_key_init: raise ValueError(F"{key} was not correctly initialized!" ) print(F"Saving model to {pytorch_dump_folder_path}" ) prophet.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCamelCase : Any = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
306
0
"""simple docstring""" def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : str = generate_pascal_triangle(lowerCAmelCase_ ) for row_idx in range(lowerCAmelCase_ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=""" """ ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=""" """ ) else: print(triangle[row_idx][col_idx] , end="""""" ) print() def snake_case_ ( lowerCAmelCase_ : int ): if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) __lowercase : list[list[int]] = [] for current_row_idx in range(lowerCAmelCase_ ): __lowercase : Optional[Any] = populate_current_row(lowerCAmelCase_ , lowerCAmelCase_ ) triangle.append(lowerCAmelCase_ ) return triangle def snake_case_ ( lowerCAmelCase_ : list[list[int]] , lowerCAmelCase_ : int ): __lowercase : List[Any] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 __lowercase : int = 1, 1 for current_col_idx in range(1 , lowerCAmelCase_ ): calculate_current_element( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return current_row def snake_case_ ( lowerCAmelCase_ : list[list[int]] , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , ): __lowercase : Tuple = triangle[current_row_idx - 1][current_col_idx - 1] __lowercase : Optional[Any] = triangle[current_row_idx - 1][current_col_idx] __lowercase : Tuple = above_to_left_elt + above_to_right_elt def snake_case_ ( lowerCAmelCase_ : int ): if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) __lowercase : list[list[int]] = [[1]] for row_index in range(1 , lowerCAmelCase_ ): __lowercase : Optional[int] = [0] + result[-1] + [0] __lowercase : Any = row_index + 1 # Calculate the number of distinct elements in a row __lowercase : List[str] = sum(divmod(lowerCAmelCase_ , 2 ) ) __lowercase : List[Any] = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] __lowercase : int = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() __lowercase : str = row_first_half + row_second_half result.append(lowerCAmelCase_ ) return result def snake_case_ ( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowerCAmelCase_ : Callable , lowerCAmelCase_ : int ) -> None: __lowercase : Union[str, Any] = F"{func.__name__}({value})" __lowercase : Any = timeit(F"__main__.{call}" , setup="""import __main__""" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F"{call:38} -- {timing:.4f} seconds" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(lowerCAmelCase_ , lowerCAmelCase_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
359
def snake_case_ ( lowerCAmelCase_ : int = 200 ): __lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200] __lowercase : List[str] = [0] * (pence + 1) __lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(lowerCAmelCase_ , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_00) == 7_36_82
306
0
from manim import * class lowerCAmelCase ( __a ): '''simple docstring''' def lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" __lowercase : List[str] = Rectangle(height=0.5 , width=0.5 ) __lowercase : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) __lowercase : Dict = [mem.copy() for i in range(6 )] __lowercase : List[str] = [mem.copy() for i in range(6 )] __lowercase : str = VGroup(*__a ).arrange(__a , buff=0 ) __lowercase : List[Any] = VGroup(*__a ).arrange(__a , buff=0 ) __lowercase : str = VGroup(__a , __a ).arrange(__a , buff=0 ) __lowercase : Any = Text("""CPU""" , font_size=24 ) __lowercase : Union[str, Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__a ) __lowercase : List[Any] = [mem.copy() for i in range(1 )] __lowercase : Union[str, Any] = VGroup(*__a ).arrange(__a , buff=0 ) __lowercase : str = Text("""GPU""" , font_size=24 ) __lowercase : List[Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) gpu.align_to(__a , __a ) gpu.set_x(gpu.get_x() - 1 ) self.add(__a ) __lowercase : Optional[Any] = [mem.copy() for i in range(6 )] __lowercase : Union[str, Any] = VGroup(*__a ).arrange(__a , buff=0 ) __lowercase : Dict = Text("""Model""" , font_size=24 ) __lowercase : Optional[Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a ) model.move_to([3, -1.0, 0] ) self.play( Create(__a , run_time=1 ) , Create(__a , run_time=1 ) , Create(__a , run_time=1 ) , ) __lowercase : Union[str, Any] = MarkupText( F"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=24 , ) __lowercase : Any = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __lowercase : List[str] = MarkupText( F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(__a , run_time=2.5 ) , Write(__a ) , Write(__a ) ) self.add(__a ) __lowercase : Optional[int] = [] __lowercase : List[str] = [] __lowercase : Dict = [] for i, rect in enumerate(__a ): __lowercase : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 ) cpu_target.move_to(__a ) cpu_target.generate_target() __lowercase : Union[str, Any] = 0.46 / 4 __lowercase : Optional[int] = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=__a , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__a , buff=0.0 ) cpu_targs.append(__a ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__a ) ) second_animations.append(MoveToTarget(__a , run_time=1.5 ) ) self.play(*__a ) self.play(*__a ) self.wait()
360
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any: """simple docstring""" __lowercase : Optional[int] = parent __lowercase : List[str] = out_indices if out_indices is not None else [4] __lowercase : Optional[int] = stage_names __lowercase : Any = out_features __lowercase : Optional[Any] = backbone __lowercase : Optional[Any] = batch_size __lowercase : Union[str, Any] = image_size __lowercase : List[str] = num_channels __lowercase : str = use_pretrained_backbone __lowercase : str = is_training def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" __lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase : str = self.get_config() return config, pixel_values def lowerCAmelCase ( self : int ) -> str: """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict: """simple docstring""" __lowercase : Dict = TimmBackbone(config=__a ) model.to(__a ) model.eval() with torch.no_grad(): __lowercase : Optional[Any] = model(__a ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def lowerCAmelCase ( self : Any ) -> int: """simple docstring""" __lowercase : Union[str, Any] = self.prepare_config_and_inputs() __lowercase , __lowercase : str = config_and_inputs __lowercase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch @require_timm class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ): '''simple docstring''' _A : List[Any] = (TimmBackbone,) if is_torch_available() else () _A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {} _A : List[Any] = False _A : List[str] = False _A : Any = False _A : Optional[Any] = False def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase : str = TimmBackboneModelTester(self ) __lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a ) def lowerCAmelCase ( self : Any ) -> str: """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" __lowercase : Tuple = """resnet18""" __lowercase : Optional[int] = """microsoft/resnet-18""" __lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a ) __lowercase : Dict = AutoBackbone.from_pretrained(__a ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] ) __lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip("""TimmBackbone doesn't support feed forward chunking""" ) def lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" ) def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip("""TimmBackbone initialization is managed on the timm side""" ) def lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" ) def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" pass @unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" ) def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" pass @unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" ) def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" pass @unittest.skip("""model weights aren't tied in TimmBackbone.""" ) def lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" pass @unittest.skip("""model weights aren't tied in TimmBackbone.""" ) def lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" ) def lowerCAmelCase ( self : List[Any] ) -> Tuple: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" ) def lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" pass @unittest.skip("""TimmBackbone doesn't support output_attentions.""" ) def lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Safetensors is not supported by timm.""" ) def lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" pass def lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" __lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Optional[Any] = model_class(__a ) __lowercase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : List[str] = [*signature.parameters.keys()] __lowercase : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def lowerCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" __lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : Optional[Any] = True __lowercase : Union[str, Any] = self.has_attentions # no need to test all models as different heads yield the same functionality __lowercase : Union[str, Any] = self.all_model_classes[0] __lowercase : List[Any] = model_class(__a ) model.to(__a ) __lowercase : Optional[Any] = self._prepare_for_class(__a , __a ) __lowercase : Union[str, Any] = model(**__a ) __lowercase : Optional[int] = outputs[0][-1] # Encoder-/Decoder-only models __lowercase : Any = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __lowercase : Optional[int] = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__a ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : List[str] = model_class(__a ) model.to(__a ) model.eval() __lowercase : int = model(**__a ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __lowercase : Any = copy.deepcopy(__a ) __lowercase : Dict = None __lowercase : Tuple = model_class(__a ) model.to(__a ) model.eval() __lowercase : Optional[int] = model(**__a ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __lowercase : List[str] = copy.deepcopy(__a ) __lowercase : Optional[Any] = False __lowercase : str = model_class(__a ) model.to(__a ) model.eval() __lowercase : List[Any] = model(**__a )
306
0
def snake_case_ ( lowerCAmelCase_ : int ): assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), F"The input value of [n={number}] is not an integer" if number == 1: return 2 elif number < 1: __lowercase : Tuple = F"The input value of [n={number}] has to be > 0" raise ValueError(lowerCAmelCase_ ) else: __lowercase : Optional[int] = sylvester(number - 1 ) __lowercase : Optional[Any] = num - 1 __lowercase : Dict = num return lower * upper + 1 if __name__ == "__main__": print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
361
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase : Optional[int] = logging.get_logger(__name__) lowerCamelCase : str = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } lowerCamelCase : Optional[Any] = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ): for attribute in key.split(""".""" ): __lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if weight_type is not None: __lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape else: __lowercase : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __lowercase : Dict = value elif weight_type == "weight_g": __lowercase : Union[str, Any] = value elif weight_type == "weight_v": __lowercase : List[Any] = value elif weight_type == "bias": __lowercase : int = value elif weight_type == "running_mean": __lowercase : List[Any] = value elif weight_type == "running_var": __lowercase : int = value elif weight_type == "num_batches_tracked": __lowercase : int = value elif weight_type == "inv_freq": __lowercase : Optional[Any] = value else: __lowercase : Any = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ): __lowercase : str = [] __lowercase : Any = fairseq_model.state_dict() __lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): __lowercase : Optional[Any] = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , ) __lowercase : List[str] = True else: for key, mapped_key in MAPPING.items(): __lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __lowercase : Tuple = True if "*" in mapped_key: __lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2] __lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ ) if "pos_bias_u" in name: __lowercase : Any = None elif "pos_bias_v" in name: __lowercase : Tuple = None elif "weight_g" in name: __lowercase : Union[str, Any] = """weight_g""" elif "weight_v" in name: __lowercase : Dict = """weight_v""" elif "bias" in name: __lowercase : Union[str, Any] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowercase : str = """weight""" elif "running_mean" in name: __lowercase : str = """running_mean""" elif "inv_freq" in name: __lowercase : List[Any] = """inv_freq""" elif "running_var" in name: __lowercase : Any = """running_var""" elif "num_batches_tracked" in name: __lowercase : Any = """num_batches_tracked""" else: __lowercase : Optional[int] = None set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) continue if not is_used: unused_weights.append(lowerCAmelCase_ ) logger.warning(F"Unused weights: {unused_weights}" ) def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ): __lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1] __lowercase : int = name.split(""".""" ) __lowercase : Optional[Any] = int(items[0] ) __lowercase : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __lowercase : Union[str, Any] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __lowercase : List[str] = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) __lowercase : Union[str, Any] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) __lowercase : Dict = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(lowerCAmelCase_ ) @torch.no_grad() def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ): if config_path is not None: __lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" ) else: __lowercase : List[Any] = WavaVecaConformerConfig() if "rope" in checkpoint_path: __lowercase : Tuple = """rotary""" if is_finetuned: if dict_path: __lowercase : Any = Dictionary.load(lowerCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowercase : List[Any] = target_dict.pad_index __lowercase : Optional[int] = target_dict.bos_index __lowercase : List[Any] = target_dict.eos_index __lowercase : List[str] = len(target_dict.symbols ) __lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" ) if not os.path.isdir(lowerCAmelCase_ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) ) return os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) __lowercase : Tuple = target_dict.indices # fairseq has the <pad> and <s> switched __lowercase : int = 0 __lowercase : Any = 1 with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Dict = WavaVecaCTCTokenizer( lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , ) __lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False __lowercase : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) __lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) __lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ ) else: __lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ ) if is_finetuned: __lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" ) __lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ ) __lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ ) __lowercase : Dict = model[0].eval() recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned ) hf_wavavec.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": lowerCamelCase : int = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) lowerCamelCase : Any = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
306
0
import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class lowerCAmelCase : '''simple docstring''' def __init__( self : List[str] , __a : str , __a : int=13 , __a : Tuple=7 , __a : Optional[Any]=True , __a : str=True , __a : Union[str, Any]=False , __a : Union[str, Any]=True , __a : Tuple=99 , __a : Optional[int]=64 , __a : Dict=5 , __a : str=4 , __a : Tuple=64 , __a : Dict="gelu" , __a : Optional[Any]=0.1 , __a : Any=0.1 , __a : str=512 , __a : Dict=16 , __a : List[str]=2 , __a : List[Any]=0.02 , __a : Union[str, Any]=3 , __a : Optional[Any]=4 , __a : Optional[Any]=None , ) -> str: """simple docstring""" __lowercase : Tuple = parent __lowercase : str = batch_size __lowercase : Union[str, Any] = seq_length __lowercase : Any = is_training __lowercase : List[Any] = use_input_mask __lowercase : Optional[Any] = use_token_type_ids __lowercase : Union[str, Any] = use_labels __lowercase : int = vocab_size __lowercase : Optional[int] = hidden_size __lowercase : str = num_hidden_layers __lowercase : int = num_attention_heads __lowercase : str = intermediate_size __lowercase : str = hidden_act __lowercase : Dict = hidden_dropout_prob __lowercase : int = attention_probs_dropout_prob __lowercase : int = max_position_embeddings __lowercase : Dict = type_vocab_size __lowercase : List[Any] = type_sequence_label_size __lowercase : Optional[Any] = initializer_range __lowercase : Any = num_labels __lowercase : List[Any] = num_choices __lowercase : Union[str, Any] = scope def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" ) def lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : Tuple = None if self.use_input_mask: __lowercase : str = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase : str = None __lowercase : Optional[int] = None __lowercase : str = None if self.use_labels: __lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices ) __lowercase : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self : int , __a : Any , __a : Tuple , __a : List[str] , __a : Optional[Any] , __a : List[str] , __a : List[Any] ) -> str: """simple docstring""" __lowercase : Optional[Any] = MPNetModel(config=__a ) model.to(__a ) model.eval() __lowercase : Optional[Any] = model(__a , __a ) __lowercase : List[Any] = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : str , __a : Tuple , __a : List[Any] , __a : List[Any] , __a : Optional[Any] , __a : List[str] , __a : Optional[int] ) -> Tuple: """simple docstring""" __lowercase : Optional[int] = MPNetForQuestionAnswering(config=__a ) model.to(__a ) model.eval() __lowercase : Optional[Any] = model( __a , attention_mask=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : List[Any] , __a : List[Any] , __a : List[Any] , __a : str , __a : Any , __a : Tuple , __a : Tuple ) -> str: """simple docstring""" __lowercase : Optional[int] = self.num_labels __lowercase : Optional[int] = MPNetForSequenceClassification(__a ) model.to(__a ) model.eval() __lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : str , __a : List[str] , __a : Dict , __a : Any , __a : Dict , __a : List[Any] , __a : Union[str, Any] ) -> Any: """simple docstring""" __lowercase : int = self.num_choices __lowercase : str = MPNetForMultipleChoice(config=__a ) model.to(__a ) model.eval() __lowercase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase : List[str] = model( __a , attention_mask=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Optional[Any] , __a : List[str] , __a : Any , __a : Tuple ) -> Any: """simple docstring""" __lowercase : Any = self.num_labels __lowercase : Any = MPNetForTokenClassification(config=__a ) model.to(__a ) model.eval() __lowercase : Union[str, Any] = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" __lowercase : Any = self.prepare_config_and_inputs() (__lowercase) : Union[str, Any] = config_and_inputs __lowercase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase ( __a , __a , unittest.TestCase ): '''simple docstring''' _A : str = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _A : Any = ( { '''feature-extraction''': MPNetModel, '''fill-mask''': MPNetForMaskedLM, '''question-answering''': MPNetForQuestionAnswering, '''text-classification''': MPNetForSequenceClassification, '''token-classification''': MPNetForTokenClassification, '''zero-shot''': MPNetForSequenceClassification, } if is_torch_available() else {} ) _A : str = False _A : Dict = True def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : List[Any] = MPNetModelTester(self ) __lowercase : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 ) def lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*__a ) def lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" __lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*__a ) def lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" __lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*__a ) def lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*__a ) @require_torch class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" __lowercase : int = MPNetModel.from_pretrained("""microsoft/mpnet-base""" ) __lowercase : Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __lowercase : Union[str, Any] = model(__a )[0] __lowercase : List[str] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __a ) __lowercase : List[str] = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
362
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError("""String lengths must match!""" ) __lowercase : str = 0 for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
306
0
def snake_case_ ( lowerCAmelCase_ : int ): if num <= 0: raise ValueError("""Input must be a positive integer""" ) __lowercase : str = [True] * (num + 1) __lowercase : Optional[int] = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , lowerCAmelCase_ ): __lowercase : Optional[Any] = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase : Any = int(input('''Enter a positive integer: ''').strip()) print(prime_sieve_eratosthenes(user_num))
363
import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def snake_case_ ( lowerCAmelCase_ : Tuple ): if isinstance(lowerCAmelCase_ , collections.abc.Iterable ): return x return (x, x) @require_flax class lowerCAmelCase : '''simple docstring''' def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]: """simple docstring""" pass def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" pass def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" pass def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]: """simple docstring""" __lowercase : List[str] = np.abs((a - b) ).max() self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." ) def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : str = FlaxVisionTextDualEncoderModel(__a ) __lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str: """simple docstring""" __lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a ) __lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a ) __lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) __lowercase : int = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__a ) __lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a ) __lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a ) __lowercase : int = after_output[0] __lowercase : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__a , 1E-3 ) def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase : str = self.get_vision_text_model(__a , __a ) __lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model} __lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a ) __lowercase : Union[str, Any] = model( input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a ) __lowercase : Optional[int] = output.vision_model_output.attentions self.assertEqual(len(__a ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase : Optional[int] = to_atuple(vision_model.config.image_size ) __lowercase : List[str] = to_atuple(vision_model.config.patch_size ) __lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase : int = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase : Dict = output.text_model_output.attentions self.assertEqual(len(__a ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]: """simple docstring""" pt_model.to(__a ) pt_model.eval() # prepare inputs __lowercase : Union[str, Any] = inputs_dict __lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): __lowercase : Union[str, Any] = pt_model(**__a ).to_tuple() __lowercase : Tuple = fx_model(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__a ) __lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a ) __lowercase : Dict = fx_model_loaded(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__a ) __lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a ) pt_model_loaded.to(__a ) pt_model_loaded.eval() with torch.no_grad(): __lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 ) def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : str = VisionTextDualEncoderModel(__a ) __lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a ) __lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a ) __lowercase : Any = fx_state self.check_pt_flax_equivalence(__a , __a , __a ) def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str: """simple docstring""" __lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a ) __lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a ) __lowercase : Dict = FlaxVisionTextDualEncoderModel(__a ) __lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params ) self.check_pt_flax_equivalence(__a , __a , __a ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" __lowercase : Optional[Any] = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__a ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : int = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__a ) def lowerCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" __lowercase : List[str] = self.prepare_config_and_inputs() self.check_save_load(**__a ) def lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" __lowercase : str = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__a ) @is_pt_flax_cross_test def lowerCAmelCase ( self : List[str] ) -> Tuple: """simple docstring""" __lowercase : Optional[Any] = self.prepare_config_and_inputs() __lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" ) __lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" ) __lowercase : Dict = config_inputs_dict self.check_equivalence_pt_to_flax(__a , __a , __a ) self.check_equivalence_flax_to_pt(__a , __a , __a ) @slow def lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs() __lowercase : Dict = model_a(**__a ) __lowercase : Any = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__a ) __lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a ) __lowercase : Optional[int] = model_a(**__a ) __lowercase : Tuple = after_outputs[0] __lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__a , 1E-5 ) @require_flax class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" __lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , ) __lowercase : int = 13 __lowercase : Union[str, Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __lowercase : Tuple = random_attention_mask([batch_size, 4] ) __lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict: """simple docstring""" __lowercase : int = FlaxViTModel(__a ) __lowercase : List[Any] = FlaxBertModel(__a ) return vision_model, text_model def lowerCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = FlaxViTModelTester(self ) __lowercase : str = FlaxBertModelTester(self ) __lowercase : List[str] = vit_model_tester.prepare_config_and_inputs() __lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase : Optional[int] = vision_config_and_inputs __lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , ) __lowercase : Tuple = 13 __lowercase : Optional[Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) __lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) __lowercase : List[Any] = random_attention_mask([batch_size, 4] ) __lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any: """simple docstring""" __lowercase : Dict = FlaxCLIPVisionModel(__a ) __lowercase : Optional[Any] = FlaxBertModel(__a ) return vision_model, text_model def lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" __lowercase : List[Any] = FlaxCLIPVisionModelTester(self ) __lowercase : Optional[Any] = FlaxBertModelTester(self ) __lowercase : Any = clip_model_tester.prepare_config_and_inputs() __lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase : Dict = vision_config_and_inputs __lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" __lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 ) __lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) __lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __lowercase : Tuple = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" ) __lowercase : Optional[int] = model(**__a ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
306
0
import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCAmelCase : '''simple docstring''' @staticmethod def lowerCAmelCase ( *__a : Tuple , **__a : Optional[int] ) -> Dict: """simple docstring""" pass def snake_case_ ( lowerCAmelCase_ : Image ): __lowercase : Optional[Any] = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def snake_case_ ( lowerCAmelCase_ : Image ): __lowercase : Optional[Any] = np.array(lowerCAmelCase_ ) __lowercase : int = npimg.shape return {"hash": hashimage(lowerCAmelCase_ ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) _A : Tuple = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCAmelCase ( self : int , __a : Union[str, Any] , __a : Any , __a : List[Any] ) -> str: """simple docstring""" __lowercase : List[str] = MaskGenerationPipeline(model=__a , image_processor=__a ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCAmelCase ( self : Union[str, Any] , __a : List[Any] , __a : Union[str, Any] ) -> Optional[int]: """simple docstring""" pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" pass @slow @require_torch def lowerCAmelCase ( self : Any ) -> str: """simple docstring""" __lowercase : List[Any] = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) __lowercase : str = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 ) # Shortening by hashing __lowercase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(__a ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" __lowercase : Dict = """facebook/sam-vit-huge""" __lowercase : List[Any] = pipeline("""mask-generation""" , model=__a ) __lowercase : Dict = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __lowercase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(__a ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, ] , )
364
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
306
0
def snake_case_ ( lowerCAmelCase_ : list , lowerCAmelCase_ : list , lowerCAmelCase_ : int ): __lowercase : int = len(lowerCAmelCase_ ) __lowercase : Optional[int] = [[0] * n for i in range(lowerCAmelCase_ )] for i in range(lowerCAmelCase_ ): __lowercase : str = y_points[i] for i in range(2 , lowerCAmelCase_ ): for j in range(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : Dict = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
365
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''') # parameters used in DuplicationIndex lowerCamelCase : Union[str, Any] = 10 lowerCamelCase : List[str] = 2_56 def snake_case_ ( lowerCAmelCase_ : List[str] ): if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS: return None __lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ ) for token in set(lowerCAmelCase_ ): min_hash.update(token.encode() ) return min_hash def snake_case_ ( lowerCAmelCase_ : str ): return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0} class lowerCAmelCase : '''simple docstring''' def __init__( self : List[str] , *, __a : float = 0.85 , ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[Any] = duplication_jaccard_threshold __lowercase : Optional[Any] = NUM_PERM __lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) __lowercase : List[str] = defaultdict(__a ) def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None: """simple docstring""" __lowercase : List[Any] = self._index.query(__a ) if code_key in self._index.keys: print(F"Duplicate key {code_key}" ) return self._index.insert(__a , __a ) if len(__a ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(__a ) break else: self._duplicate_clusters[close_duplicates[0]].add(__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]: """simple docstring""" __lowercase : Dict = [] for base, duplicates in self._duplicate_clusters.items(): __lowercase : List[str] = [base] + list(__a ) # reformat the cluster to be a list of dict __lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster] duplicate_clusters.append(__a ) return duplicate_clusters def lowerCAmelCase ( self : Any , __a : int ) -> None: """simple docstring""" __lowercase : Tuple = self.get_duplicate_clusters() with open(__a , """w""" ) as f: json.dump(__a , __a ) def snake_case_ ( lowerCAmelCase_ : str ): __lowercase , __lowercase : Union[str, Any] = element __lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ): with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ): if data is not None: yield data def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ): __lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ): di.add(lowerCAmelCase_ , lowerCAmelCase_ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ): __lowercase : List[str] = get_tokens(lowerCAmelCase_ ) __lowercase : Dict = get_tokens(lowerCAmelCase_ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) lowerCamelCase : List[str] = None def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ): __lowercase : Union[str, Any] = [] for elementa in cluster: __lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""] for elementa in extremes: __lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""] if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold: elementa["copies"] += 1 break else: __lowercase : Dict = 1 extremes.append(lowerCAmelCase_ ) return extremes def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ): global _shared_dataset __lowercase : Tuple = dataset __lowercase : Optional[int] = [] __lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ): extremes_list.append(lowerCAmelCase_ ) return extremes_list def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ): __lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster} __lowercase : int = {} __lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for extremes in extremes_clusters: for element in extremes: __lowercase : Optional[Any] = element __lowercase : int = duplicate_indices - set(extreme_dict.keys() ) __lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: __lowercase : List[str] = element["""base_index"""] in extreme_dict if element["is_extreme"]: __lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""] print(F"Original dataset size: {len(lowerCAmelCase_ )}" ) print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" ) print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" ) print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" ) print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" ) return ds_filter, duplicate_clusters
306
0
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class lowerCAmelCase ( __a , unittest.TestCase ): '''simple docstring''' _A : List[str] = PriorTransformer _A : Optional[int] = '''hidden_states''' @property def lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" __lowercase : Dict = 4 __lowercase : List[str] = 8 __lowercase : str = 7 __lowercase : Optional[Any] = floats_tensor((batch_size, embedding_dim) ).to(__a ) __lowercase : str = floats_tensor((batch_size, embedding_dim) ).to(__a ) __lowercase : List[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__a ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowerCAmelCase ( self : List[Any] , __a : Optional[int]=0 ) -> Dict: """simple docstring""" torch.manual_seed(__a ) __lowercase : str = 4 __lowercase : str = 8 __lowercase : Dict = 7 __lowercase : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(__a ) __lowercase : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(__a ) __lowercase : List[str] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__a ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" return (4, 8) @property def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" return (4, 8) def lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" __lowercase : int = { """num_attention_heads""": 2, """attention_head_dim""": 4, """num_layers""": 2, """embedding_dim""": 8, """num_embeddings""": 7, """additional_embeddings""": 4, } __lowercase : List[Any] = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase ( self : List[str] ) -> Any: """simple docstring""" __lowercase : List[str] = PriorTransformer.from_pretrained( """hf-internal-testing/prior-dummy""" , output_loading_info=__a ) self.assertIsNotNone(__a ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__a ) __lowercase : Optional[Any] = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" __lowercase : Dict = self.prepare_init_args_and_inputs_for_common() __lowercase : List[Any] = self.model_class(**__a ) __lowercase : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : List[str] = [*signature.parameters.keys()] __lowercase : List[Any] = ["""hidden_states""", """timestep"""] self.assertListEqual(arg_names[:2] , __a ) def lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" __lowercase : Optional[int] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" ) __lowercase : List[str] = model.to(__a ) if hasattr(__a , """set_default_attn_processor""" ): model.set_default_attn_processor() __lowercase : str = self.get_dummy_seed_input() with torch.no_grad(): __lowercase : int = model(**__a )[0] __lowercase : List[str] = output[0, :5].flatten().cpu() print(__a ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __lowercase : Union[str, Any] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] ) self.assertTrue(torch_all_close(__a , __a , rtol=1E-2 ) ) @slow class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Tuple , __a : Union[str, Any]=1 , __a : str=768 , __a : Optional[int]=77 , __a : Union[str, Any]=0 ) -> Optional[Any]: """simple docstring""" torch.manual_seed(__a ) __lowercase : Union[str, Any] = batch_size __lowercase : Any = embedding_dim __lowercase : int = num_embeddings __lowercase : List[Any] = torch.randn((batch_size, embedding_dim) ).to(__a ) __lowercase : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(__a ) __lowercase : str = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__a ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def lowerCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]], [37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]], # fmt: on ] ) def lowerCAmelCase ( self : Union[str, Any] , __a : str , __a : str ) -> int: """simple docstring""" __lowercase : Any = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" ) model.to(__a ) __lowercase : Union[str, Any] = self.get_dummy_seed_input(seed=__a ) with torch.no_grad(): __lowercase : int = model(**__a )[0] assert list(sample.shape ) == [1, 768] __lowercase : Any = sample[0, :8].flatten().cpu() print(__a ) __lowercase : Optional[int] = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=1E-3 )
366
from ...processing_utils import ProcessorMixin class lowerCAmelCase ( __a ): '''simple docstring''' _A : List[str] = ['''image_processor''', '''feature_extractor'''] _A : List[Any] = '''TvltImageProcessor''' _A : Optional[int] = '''TvltFeatureExtractor''' def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]: """simple docstring""" super().__init__(image_processor=__a , feature_extractor=__a ) __lowercase : Union[str, Any] = image_processor __lowercase : Tuple = feature_extractor def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict: """simple docstring""" if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) __lowercase : Tuple = None if images is not None: __lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a ) if images_mixed is not None: __lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a ) if audio is not None: __lowercase : Optional[Any] = self.feature_extractor( __a , *__a , sampling_rate=__a , mask_audio=__a , **__a ) __lowercase : Tuple = {} if audio is not None: output_dict.update(__a ) if images is not None: output_dict.update(__a ) if images_mixed_dict is not None: output_dict.update(__a ) return output_dict @property def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase : int = self.image_processor.model_input_names __lowercase : Union[str, Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
306
0
from math import factorial, pi def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : int = 30 ): if not isinstance(lowerCAmelCase_ , (int, float) ): raise ValueError("""maclaurin_sin() requires either an int or float for theta""" ) if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or accuracy <= 0: raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" ) __lowercase : Optional[int] = float(lowerCAmelCase_ ) __lowercase : Any = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowerCAmelCase_ ) ) def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : int = 30 ): if not isinstance(lowerCAmelCase_ , (int, float) ): raise ValueError("""maclaurin_cos() requires either an int or float for theta""" ) if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or accuracy <= 0: raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" ) __lowercase : int = float(lowerCAmelCase_ ) __lowercase : Tuple = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowerCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
367
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class lowerCAmelCase : '''simple docstring''' def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]: """simple docstring""" __lowercase : Tuple = parent __lowercase : int = batch_size __lowercase : Any = seq_length __lowercase : str = is_training __lowercase : str = use_input_mask __lowercase : Optional[int] = use_token_type_ids __lowercase : List[Any] = use_labels __lowercase : Optional[Any] = vocab_size __lowercase : int = hidden_size __lowercase : List[Any] = num_hidden_layers __lowercase : Dict = num_attention_heads __lowercase : Any = intermediate_size __lowercase : Dict = hidden_act __lowercase : Union[str, Any] = hidden_dropout_prob __lowercase : List[Any] = attention_probs_dropout_prob __lowercase : List[str] = max_position_embeddings __lowercase : Union[str, Any] = type_vocab_size __lowercase : Dict = type_sequence_label_size __lowercase : Union[str, Any] = initializer_range __lowercase : List[Any] = num_labels __lowercase : str = num_choices __lowercase : Tuple = scope def lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" __lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : int = None if self.use_input_mask: __lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase : str = None __lowercase : Optional[Any] = None __lowercase : Tuple = None if self.use_labels: __lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowercase : int = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = EsmModel(config=__a ) model.to(__a ) model.eval() __lowercase : str = model(__a , attention_mask=__a ) __lowercase : List[Any] = model(__a ) __lowercase : Optional[int] = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase : List[str] = EsmForMaskedLM(config=__a ) model.to(__a ) model.eval() __lowercase : int = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase : Tuple = self.num_labels __lowercase : Any = EsmForTokenClassification(config=__a ) model.to(__a ) model.eval() __lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" __lowercase : Any = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) : List[str] = config_and_inputs __lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase ( __a , __a , unittest.TestCase ): '''simple docstring''' _A : Optional[Any] = False _A : Any = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) _A : Optional[Any] = () _A : List[Any] = ( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) _A : Optional[Any] = True def lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" __lowercase : Optional[int] = EsmModelTester(self ) __lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" __lowercase : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowercase : Union[str, Any] = type self.model_tester.create_and_check_model(*__a ) def lowerCAmelCase ( self : int ) -> Any: """simple docstring""" __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" __lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) @slow def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : List[str] = EsmModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] __lowercase : List[str] = EsmEmbeddings(config=__a ) __lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) __lowercase : int = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) __lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) def lowerCAmelCase ( self : Tuple ) -> Any: """simple docstring""" __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] __lowercase : Optional[Any] = EsmEmbeddings(config=__a ) __lowercase : Optional[int] = torch.empty(2 , 4 , 30 ) __lowercase : Tuple = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] __lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) __lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" pass @unittest.skip("""Esm does not support embedding resizing""" ) def lowerCAmelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def lowerCAmelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" pass @require_torch class lowerCAmelCase ( __a ): '''simple docstring''' @slow def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): __lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() __lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __lowercase : List[str] = model(__a )[0] __lowercase : Union[str, Any] = 33 __lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , __a ) __lowercase : List[Any] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) ) @slow def lowerCAmelCase ( self : str ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): __lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() __lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) __lowercase : Any = model(__a )[0] # compare the actual values for a slice. __lowercase : int = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
306
0
from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class lowerCAmelCase ( __a ): '''simple docstring''' def __lt__( self : List[Any] , __a : Optional[Any] ) -> Any: """simple docstring""" return self[-1] < other[-1] def __eq__( self : Any , __a : int ) -> Union[str, Any]: """simple docstring""" return self[-1] == other[-1] def snake_case_ ( lowerCAmelCase_ : list ): __lowercase : list[Stack] = [] # sort into stacks for element in collection: __lowercase : str = Stack([element] ) __lowercase : Dict = bisect_left(lowerCAmelCase_ , lowerCAmelCase_ ) if i != len(lowerCAmelCase_ ): stacks[i].append(lowerCAmelCase_ ) else: stacks.append(lowerCAmelCase_ ) # use a heap-based merge to merge stack efficiently __lowercase : Any = merge(*(reversed(lowerCAmelCase_ ) for stack in stacks) ) return collection if __name__ == "__main__": lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(''',''')] print(patience_sort(unsorted))
368
def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : int = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def snake_case_ ( lowerCAmelCase_ : int = 5000 ): __lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )] for i, pentagonal_i in enumerate(lowerCAmelCase_ ): for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ): __lowercase : int = pentagonal_nums[j] __lowercase : Optional[int] = pentagonal_i + pentagonal_j __lowercase : Union[str, Any] = pentagonal_j - pentagonal_i if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ): return b return -1 if __name__ == "__main__": print(f'''{solution() = }''')
306
0
def snake_case_ ( lowerCAmelCase_ : list[int] ): __lowercase : Any = len(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ): for j in range(i + 1 , lowerCAmelCase_ ): if numbers[j] < numbers[i]: __lowercase : Tuple = numbers[j], numbers[i] return numbers if __name__ == "__main__": lowerCamelCase : List[Any] = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase : Any = [int(item) for item in user_input.split(''',''')] print(exchange_sort(unsorted))
369
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCAmelCase ( __a ): '''simple docstring''' _A : Optional[Any] = (DPMSolverSDEScheduler,) _A : Dict = 10 def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]: """simple docstring""" __lowercase : Any = { """num_train_timesteps""": 1100, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """noise_sampler_seed""": 0, } config.update(**__a ) return config def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__a ) def lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def lowerCAmelCase ( self : str ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__a ) def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase : Optional[int] = self.scheduler_classes[0] __lowercase : List[str] = self.get_scheduler_config() __lowercase : Any = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase : Optional[Any] = self.dummy_model() __lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase : Optional[Any] = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): __lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[Any] = model(__a , __a ) __lowercase : Optional[Any] = scheduler.step(__a , __a , __a ) __lowercase : str = output.prev_sample __lowercase : Optional[Any] = torch.sum(torch.abs(__a ) ) __lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase : Tuple = self.scheduler_classes[0] __lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" ) __lowercase : int = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) __lowercase : Optional[int] = self.dummy_model() __lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma __lowercase : Dict = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): __lowercase : Dict = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[int] = model(__a , __a ) __lowercase : Optional[int] = scheduler.step(__a , __a , __a ) __lowercase : int = output.prev_sample __lowercase : Optional[Any] = torch.sum(torch.abs(__a ) ) __lowercase : List[str] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3 def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __lowercase : Tuple = self.scheduler_classes[0] __lowercase : Dict = self.get_scheduler_config() __lowercase : Optional[int] = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) __lowercase : int = self.dummy_model() __lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: __lowercase : int = scheduler.scale_model_input(__a , __a ) __lowercase : List[str] = model(__a , __a ) __lowercase : List[str] = scheduler.step(__a , __a , __a ) __lowercase : int = output.prev_sample __lowercase : List[Any] = torch.sum(torch.abs(__a ) ) __lowercase : Optional[Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase : str = self.scheduler_classes[0] __lowercase : List[Any] = self.get_scheduler_config() __lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) __lowercase : List[str] = self.dummy_model() __lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma __lowercase : str = sample.to(__a ) for t in scheduler.timesteps: __lowercase : List[Any] = scheduler.scale_model_input(__a , __a ) __lowercase : Optional[Any] = model(__a , __a ) __lowercase : Any = scheduler.step(__a , __a , __a ) __lowercase : Optional[Any] = output.prev_sample __lowercase : Any = torch.sum(torch.abs(__a ) ) __lowercase : Optional[Any] = torch.mean(torch.abs(__a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
306
0
def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : Tuple = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def snake_case_ ( lowerCAmelCase_ : int = 100 ): __lowercase : List[Any] = 1 __lowercase : str = 2 for i in range(2 , max_n + 1 ): __lowercase : Dict = pre_numerator __lowercase : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1 __lowercase : Any = cur_numerator __lowercase : Tuple = e_cont * pre_numerator + temp return sum_digits(lowerCAmelCase_ ) if __name__ == "__main__": print(f'''{solution() = }''')
370
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate lowerCamelCase : str = trt.Logger(trt.Logger.WARNING) lowerCamelCase : Any = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) lowerCamelCase : Optional[Any] = logging.getLogger(__name__) lowerCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--onnx_model_path''', default=None, type=str, required=True, help='''Path to ONNX model: ''', ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''The output directory where the model checkpoints and predictions will be written.''', ) # Other parameters parser.add_argument( '''--tokenizer_name''', default='''''', type=str, required=True, help='''Pretrained tokenizer name or path if not the same as model_name''', ) parser.add_argument( '''--version_2_with_negative''', action='''store_true''', help='''If true, the SQuAD examples contain some that do not have an answer.''', ) parser.add_argument( '''--null_score_diff_threshold''', type=float, default=0.0, help='''If null_score - best_non_null is greater than the threshold predict null.''', ) parser.add_argument( '''--max_seq_length''', default=3_84, type=int, help=( '''The maximum total input sequence length after WordPiece tokenization. Sequences ''' '''longer than this will be truncated, and sequences shorter than this will be padded.''' ), ) parser.add_argument( '''--doc_stride''', default=1_28, type=int, help='''When splitting up a long document into chunks, how much stride to take between chunks.''', ) parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''') parser.add_argument( '''--n_best_size''', default=20, type=int, help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''', ) parser.add_argument( '''--max_answer_length''', default=30, type=int, help=( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ), ) parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''') parser.add_argument( '''--dataset_name''', type=str, default=None, required=True, help='''The name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--dataset_config_name''', type=str, default=None, help='''The configuration name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.''' ) parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''') parser.add_argument( '''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision instead of 32-bit''', ) parser.add_argument( '''--int8''', action='''store_true''', help='''Whether to use INT8''', ) lowerCamelCase : Dict = parser.parse_args() if args.tokenizer_name: lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) logger.info('''Training/evaluation parameters %s''', args) lowerCamelCase : List[str] = args.per_device_eval_batch_size lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties lowerCamelCase : List[str] = True lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine''' if args.fpaa: lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine''' if args.inta: lowerCamelCase : int = '''temp_engine/bert-int8.engine''' # import ONNX file if not os.path.exists('''temp_engine'''): os.makedirs('''temp_engine''') lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, '''rb''') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)] lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: lowerCamelCase : List[str] = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) lowerCamelCase : Optional[int] = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) lowerCamelCase : Optional[Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, '''wb''') as f: f.write(engine.serialize()) def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ): __lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) __lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) __lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ ) # start time __lowercase : Optional[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Synchronize the stream and take time stream.synchronize() # end time __lowercase : int = time.time() __lowercase : Union[str, Any] = end_time - start_time __lowercase : Any = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. lowerCamelCase : Tuple = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('''Evaluation requires a dataset name''') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0] lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1] lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). lowerCamelCase : Dict = tokenizer.padding_side == '''right''' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length) def snake_case_ ( lowerCAmelCase_ : int ): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace __lowercase : str = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __lowercase : List[str] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __lowercase : Any = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ ) __lowercase : List[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __lowercase : List[str] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __lowercase : Dict = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples lowerCamelCase : Tuple = raw_datasets['''validation'''] # Validation Feature Creation lowerCamelCase : Optional[int] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='''Running tokenizer on validation dataset''', ) lowerCamelCase : Union[str, Any] = default_data_collator lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping''']) lowerCamelCase : List[str] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ): # Post-processing: we match the start logits and end logits to answers in the original context. __lowercase : int = postprocess_qa_predictions( examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __lowercase : Optional[int] = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: __lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] __lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ ) lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''') # Evaluation! logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path) with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def snake_case_ ( lowerCAmelCase_ : str ): return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize # Allocate device memory for inputs and outputs. lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes) lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. lowerCamelCase : Optional[int] = cuda.Stream() # Evaluation logger.info('''***** Running Evaluation *****''') logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') lowerCamelCase : int = 0.0 lowerCamelCase : List[str] = 0 lowerCamelCase : List[str] = timeit.default_timer() lowerCamelCase : List[Any] = None for step, batch in enumerate(eval_dataloader): lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs lowerCamelCase : Optional[Any] = torch.tensor(start_logits) lowerCamelCase : List[str] = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00) lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00) lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00) if all_preds is not None: lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset)) lowerCamelCase : Dict = timeit.default_timer() - start_time logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter)) logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00)) logger.info('''Total Number of Inference = %d''', niter) lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds) lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
306
0
"""simple docstring""" import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @property def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __lowercase : int = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" __lowercase : Optional[Any] = self.dummy_uncond_unet __lowercase : Any = PNDMScheduler() __lowercase : Dict = PNDMPipeline(unet=__a , scheduler=__a ) pndm.to(__a ) pndm.set_progress_bar_config(disable=__a ) __lowercase : List[str] = torch.manual_seed(0 ) __lowercase : List[str] = pndm(generator=__a , num_inference_steps=20 , output_type="""numpy""" ).images __lowercase : Tuple = torch.manual_seed(0 ) __lowercase : Union[str, Any] = pndm(generator=__a , num_inference_steps=20 , output_type="""numpy""" , return_dict=__a )[0] __lowercase : Any = image[0, -3:, -3:, -1] __lowercase : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowercase : Optional[int] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase : List[str] = """google/ddpm-cifar10-32""" __lowercase : str = UNetaDModel.from_pretrained(__a ) __lowercase : int = PNDMScheduler() __lowercase : Optional[int] = PNDMPipeline(unet=__a , scheduler=__a ) pndm.to(__a ) pndm.set_progress_bar_config(disable=__a ) __lowercase : Optional[int] = torch.manual_seed(0 ) __lowercase : List[str] = pndm(generator=__a , output_type="""numpy""" ).images __lowercase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowercase : int = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
371
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase : str = { '''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''', } class lowerCAmelCase ( __a ): '''simple docstring''' _A : int = '''nllb-moe''' _A : List[str] = ['''past_key_values'''] _A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any: """simple docstring""" __lowercase : int = vocab_size __lowercase : List[Any] = max_position_embeddings __lowercase : Tuple = d_model __lowercase : str = encoder_ffn_dim __lowercase : List[str] = encoder_layers __lowercase : int = encoder_attention_heads __lowercase : List[Any] = decoder_ffn_dim __lowercase : int = decoder_layers __lowercase : Optional[int] = decoder_attention_heads __lowercase : Union[str, Any] = dropout __lowercase : str = attention_dropout __lowercase : Any = activation_dropout __lowercase : List[Any] = activation_function __lowercase : List[str] = init_std __lowercase : Optional[int] = encoder_layerdrop __lowercase : str = decoder_layerdrop __lowercase : Dict = use_cache __lowercase : Optional[Any] = encoder_layers __lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True __lowercase : List[Any] = router_z_loss_coef __lowercase : Tuple = router_aux_loss_coef __lowercase : str = decoder_sparse_step __lowercase : Any = encoder_sparse_step __lowercase : str = num_experts __lowercase : List[Any] = expert_capacity __lowercase : int = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" ) __lowercase : Optional[int] = router_dtype __lowercase : Any = router_ignore_padding_tokens __lowercase : Optional[Any] = batch_prioritized_routing __lowercase : str = second_expert_policy __lowercase : List[str] = normalize_router_prob_before_dropping __lowercase : List[Any] = moe_eval_capacity_token_fraction __lowercase : List[str] = moe_token_dropout __lowercase : Optional[Any] = output_router_logits super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
306
0
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict: lowerCAmelCase__ : Any = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() ) lowerCAmelCase__ : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCamelCase__ = logging.getLogger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: if metric == "rouge2": lowerCAmelCase__ : int = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": lowerCAmelCase__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": lowerCAmelCase__ : Optional[int] = '{val_avg_em:.4f}-{step_count}' else: raise NotImplementedError( F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' ' function.' ) lowerCAmelCase__ : int = ModelCheckpoint( dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F'''val_{metric}''' , mode='max' , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: return EarlyStopping( monitor=F'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , ) class A__ ( pl.Callback ): def _lowerCamelCase ( self : Any , a : Tuple , a : Any ): '''simple docstring''' lowerCAmelCase__ : List[Any] = {f'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(a ) @rank_zero_only def _lowerCamelCase ( self : Tuple , a : pl.Trainer , a : pl.LightningModule , a : str , a : List[Any]=True ): '''simple docstring''' logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) lowerCAmelCase__ : Dict = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results lowerCAmelCase__ : List[Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": lowerCAmelCase__ : Dict = od / 'test_results.txt' lowerCAmelCase__ : str = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. lowerCAmelCase__ : int = od / f'''{type_path}_results/{trainer.global_step:05d}.txt''' lowerCAmelCase__ : List[Any] = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=a ) generations_file.parent.mkdir(exist_ok=a ) with open(a , 'a+' ) as writer: for key in sorted(a ): if key in ["log", "progress_bar", "preds"]: continue lowerCAmelCase__ : List[Any] = metrics[key] if isinstance(a , torch.Tensor ): lowerCAmelCase__ : Optional[Any] = val.item() lowerCAmelCase__ : List[str] = f'''{key}: {val:.6f}\n''' writer.write(a ) if not save_generations: return if "preds" in metrics: lowerCAmelCase__ : Tuple = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(a ) @rank_zero_only def _lowerCamelCase ( self : int , a : Optional[int] , a : Optional[int] ): '''simple docstring''' try: lowerCAmelCase__ : List[Any] = pl_module.model.model.num_parameters() except AttributeError: lowerCAmelCase__ : str = pl_module.model.num_parameters() lowerCAmelCase__ : List[str] = count_trainable_parameters(a ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def _lowerCamelCase ( self : List[Any] , a : pl.Trainer , a : pl.LightningModule ): '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(a , a , 'test' ) @rank_zero_only def _lowerCamelCase ( self : List[Any] , a : pl.Trainer , a : List[Any] ): '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
307
import torch from torch import nn class A__ ( nn.Module ): def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ): '''simple docstring''' super().__init__() lowerCAmelCase__ : Dict = n_token lowerCAmelCase__ : Any = d_embed lowerCAmelCase__ : str = d_proj lowerCAmelCase__ : int = cutoffs + [n_token] lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs lowerCAmelCase__ : str = div_val lowerCAmelCase__ : Tuple = self.cutoffs[0] lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1 lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters if self.n_clusters > 0: lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) ) lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) ) lowerCAmelCase__ : Optional[int] = nn.ModuleList() lowerCAmelCase__ : Tuple = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) ) else: self.out_projs.append(a ) self.out_layers.append(nn.Linear(a , a ) ) else: for i in range(len(self.cutoffs ) ): lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) ) self.out_layers.append(nn.Linear(a , r_idx - l_idx ) ) lowerCAmelCase__ : Tuple = keep_order def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ): '''simple docstring''' if proj is None: lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() ) lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ): '''simple docstring''' if labels is not None: # Shift so that tokens < n predict n lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous() lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous() lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) ) lowerCAmelCase__ : Tuple = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError('Input and labels should have the same size in the batch dimension.' ) else: lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) ) if self.n_clusters == 0: lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) if labels is not None: lowerCAmelCase__ : str = labels != -100 lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device ) lowerCAmelCase__ : List[str] = ( -nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 ) else: # construct weights and biases lowerCAmelCase__ , lowerCAmelCase__ : int = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx] lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx] else: lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias if i == 0: lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 ) lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(a ) biases.append(a ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0] lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a ) lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 ) if labels is None: lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device ) lowerCAmelCase__ : Tuple = 0 lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs for i in range(len(a ) - 1 ): lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1] if labels is not None: lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx) lowerCAmelCase__ : int = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx lowerCAmelCase__ : Any = head_logprob.index_select(0 , a ) lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a ) else: lowerCAmelCase__ : Any = hidden if i == 0: if labels is not None: lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 ) else: lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]] else: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i] lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a ) lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 ) lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None] ).squeeze(1 ) else: lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i lowerCAmelCase__ : Union[str, Any] = logprob_i if labels is not None: if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order: out.index_copy_(0 , a , -logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def _lowerCamelCase ( self : List[Any] , a : Any ): '''simple docstring''' if self.n_clusters == 0: lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) return nn.functional.log_softmax(a , dim=-1 ) else: # construct weights and biases lowerCAmelCase__ , lowerCAmelCase__ : str = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx] lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx] else: lowerCAmelCase__ : int = self.out_layers[i].weight lowerCAmelCase__ : int = self.out_layers[i].bias if i == 0: lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 ) lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(a ) biases.append(a ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0] lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a ) lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) ) lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 ) lowerCAmelCase__ : List[Any] = [0] + self.cutoffs for i in range(len(a ) - 1 ): lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1] if i == 0: lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]] else: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i] lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a ) lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 ) lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i lowerCAmelCase__ : List[str] = logprob_i return out
307
1
import os import numpy import onnx def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: lowerCAmelCase__ : Dict = a.name lowerCAmelCase__ : Optional[int] = b.name lowerCAmelCase__ : List[Any] = '' lowerCAmelCase__ : int = '' lowerCAmelCase__ : int = a == b lowerCAmelCase__ : Any = name_a lowerCAmelCase__ : Union[str, Any] = name_b return res def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _graph_replace_input_with(node_proto.attribute[1].g , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: for n in graph_proto.node: _node_replace_input_with(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: lowerCAmelCase__ : Any = list(model.graph.initializer ) lowerCAmelCase__ : str = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i lowerCAmelCase__ : List[str] = inits[i].name lowerCAmelCase__ : Optional[int] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]: lowerCAmelCase__ : List[str] = os.path.dirname(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : int = os.path.basename(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Any = onnx.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowerCAmelCase__ : int = list(model.graph.initializer ) lowerCAmelCase__ : Any = set() lowerCAmelCase__ : List[str] = {} lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Tuple = 0 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): if i in dup_set: continue for j in range(i + 1 , len(SCREAMING_SNAKE_CASE_ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(SCREAMING_SNAKE_CASE_ ) dup_set.add(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : int = inits[j].data_type lowerCAmelCase__ : List[Any] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , SCREAMING_SNAKE_CASE_ ) total_reduced_size += mem_size lowerCAmelCase__ : Optional[Any] = inits[i].name lowerCAmelCase__ : Tuple = inits[j].name if name_i in dup_map: dup_map[name_i].append(SCREAMING_SNAKE_CASE_ ) else: lowerCAmelCase__ : Union[str, Any] = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1_024 / 1_024 / 1_024 , 'GB' ) lowerCAmelCase__ : Optional[Any] = sorted(SCREAMING_SNAKE_CASE_ ) _remove_dup_initializers_from_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Optional[Any] = 'optimized_' + model_file_name lowerCAmelCase__ : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) onnx.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return new_model
307
import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowerCamelCase__ = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("""""", """|""", """|"""), datarow=DataRow("""""", """|""", """|"""), padding=1, with_header_hide=None, ) lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}} lowerCamelCase__ = [ { """type""": """header""", """text""": { """type""": """plain_text""", """text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""", """emoji""": True, }, } ] lowerCamelCase__ = 0 for log in Path().glob("""*.log"""): lowerCamelCase__ = 0 with open(log, """r""") as f: for line in f: lowerCamelCase__ = json.loads(line) if line.get("""nodeid""", """""") != "": lowerCamelCase__ = line["""nodeid"""] if line.get("""duration""", None) is not None: lowerCamelCase__ = F"""{line["duration"]:.4f}""" if line.get("""outcome""", """""") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("""_""")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowerCamelCase__ = [] log.unlink() lowerCamelCase__ = """""" lowerCamelCase__ = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" lowerCamelCase__ = [] lowerCamelCase__ = {} for test in failed_tests: lowerCamelCase__ = test[0].split("""::""") lowerCamelCase__ = data[0].split("""/""")[-1] if data[0] not in filesafailed: lowerCamelCase__ = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowerCamelCase__ = [test[0] for test in failed_table] lowerCamelCase__ = list(set(files)) # Count number of instances in failed_tests lowerCamelCase__ = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowerCamelCase__ = tabulate( table, headers=["""Test Location""", """Num Failed"""], tablefmt=hf_table_format, stralign="""right""", ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3000: lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results.""" lowerCamelCase__ = len(err) + 10 lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}""" print(F"""### {message}""") else: lowerCamelCase__ = """No failed tests! 🤗""" print(F"""## {message}""") payload.append(no_error_payload) if os.environ.get("""TEST_TYPE""", """""") != "": from slack_sdk import WebClient lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""]) if message != "No failed tests! 🤗": lowerCamelCase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": message, }, } payload.append(md_report) lowerCamelCase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": """*For more details:*""", }, """accessory""": { """type""": """button""", """text""": { """type""": """plain_text""", """text""": """Check Action results""", """emoji""": True, }, """url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""", }, } payload.append(action_button) lowerCamelCase__ = { """type""": """context""", """elements""": [ { """type""": """plain_text""", """text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""", } ], } payload.append(date_report) lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload) lowerCamelCase__ = response.data["""ts"""] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowerCamelCase__ = """""" for i, row in enumerate(test_failures): if row[0] != test_class: lowerCamelCase__ = row[0] else: lowerCamelCase__ = """""" lowerCamelCase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""", }, } client.chat_postMessage( channel="""#accelerate-ci-daily""", thread_ts=ts, blocks=[payload], )
307
1
from math import factorial lowerCamelCase__ = {str(digit): factorial(digit) for digit in range(10)} def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int: if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise TypeError('Parameter number must be int' ) if number < 0: raise ValueError('Parameter number must be greater than or equal to 0' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(SCREAMING_SNAKE_CASE_ ) ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 60 , SCREAMING_SNAKE_CASE_ = 1_000_000 ) -> int: if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise TypeError('Parameters chain_length and number_limit must be int' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( 'Parameters chain_length and number_limit must be greater than 0' ) # the counter for the chains with the exact desired length lowerCAmelCase__ : Tuple = 0 # the cached sizes of the previous chains lowerCAmelCase__ : dict[int, int] = {} for start_chain_element in range(1 , SCREAMING_SNAKE_CASE_ ): # The temporary set will contain the elements of the chain lowerCAmelCase__ : Dict = set() lowerCAmelCase__ : str = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. lowerCAmelCase__ : Dict = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(SCREAMING_SNAKE_CASE_ ) chain_set_length += 1 lowerCAmelCase__ : Union[str, Any] = digit_factorial_sum(SCREAMING_SNAKE_CASE_ ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] lowerCAmelCase__ : Tuple = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution()}""")
307
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""") lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY) def lowerCAmelCase__ ( ) -> Dict: lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ ) # assert negative_img array for at least one True assert negative_img.any() def lowerCAmelCase__ ( ) -> Optional[Any]: with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def lowerCAmelCase__ ( ) -> Tuple: lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCAmelCase__ ( ) -> Tuple: lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ ) # assert canny array for at least one True assert canny_array.any() def lowerCAmelCase__ ( ) -> Optional[int]: assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all() def lowerCAmelCase__ ( ) -> Dict: # laplace diagonals lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ ) assert res.any() def lowerCAmelCase__ ( ) -> List[str]: assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any() def lowerCAmelCase__ ( ) -> Any: lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ ) assert grad.any() and theta.any() def lowerCAmelCase__ ( ) -> Any: lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 ) assert sepia.all() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]: lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any: lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def lowerCAmelCase__ ( ) -> int: lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 ) # Test for get_neighbors_pixel function() return not None lowerCAmelCase__ : str = 0 lowerCAmelCase__ : str = 0 lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate] lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert lbp_image.any()
307
1
def lowerCAmelCase__ ( ) -> int: return [ a * b * (1_000 - a - b) for a in range(1 , 999 ) for b in range(SCREAMING_SNAKE_CASE_ , 999 ) if (a * a + b * b == (1_000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F"""{solution() = }""")
307
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase__ = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
1
import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 lowerCamelCase__ = get_tests_dir("""fixtures/dummy-config.json""") class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : int = 0 def _lowerCamelCase ( self : Any ): '''simple docstring''' self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = AutoConfig.from_pretrained('bert-base-uncased' ) self.assertIsInstance(a , a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = AutoConfig.from_pretrained(a ) self.assertIsInstance(a , a ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : List[Any] = AutoConfig.from_pretrained(a ) self.assertIsInstance(a , a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : str = AutoConfig.for_model('roberta' ) self.assertIsInstance(a , a ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. lowerCAmelCase__ : Any = os.path.join(a , 'fake-roberta' ) os.makedirs(a , exist_ok=a ) with open(os.path.join(a , 'config.json' ) , 'w' ) as f: f.write(json.dumps({} ) ) lowerCAmelCase__ : Union[str, Any] = AutoConfig.from_pretrained(a ) self.assertEqual(type(a ) , a ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' try: AutoConfig.register('custom' , a ) # Wrong model type will raise an error with self.assertRaises(a ): AutoConfig.register('model' , a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(a ): AutoConfig.register('bert' , a ) # Now that the config is registered, it can be used as any other config with the auto-API lowerCAmelCase__ : Union[str, Any] = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(a ) lowerCAmelCase__ : Dict = AutoConfig.from_pretrained(a ) self.assertIsInstance(a , a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' with self.assertRaisesRegex( a , 'bert-base is not a local folder and is not a valid model identifier' ): lowerCAmelCase__ : Any = AutoConfig.from_pretrained('bert-base' ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' with self.assertRaisesRegex( a , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): lowerCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(a , revision='aaaaaa' ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' with self.assertRaisesRegex( a , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ): lowerCAmelCase__ : List[str] = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' ) def _lowerCamelCase ( self : Any ): '''simple docstring''' with self.assertRaises(a ): lowerCAmelCase__ : int = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' ) # If remote code is disabled, we can't load this config. with self.assertRaises(a ): lowerCAmelCase__ : List[str] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=a ) lowerCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=a ) self.assertEqual(config.__class__.__name__ , 'NewModelConfig' ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(a ) lowerCAmelCase__ : str = AutoConfig.from_pretrained(a , trust_remote_code=a ) self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' class A__ ( __magic_name__ ): lowercase = 'new-model' try: AutoConfig.register('new-model' , a ) # If remote code is not set, the default is to use local lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' ) self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' ) # If remote code is disabled, we load the local one. lowerCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=a ) self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' ) # If remote is enabled, we load from the Hub lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=a ) self.assertEqual(config.__class__.__name__ , 'NewModelConfig' ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
307
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]: if alpha_transform_type == "cosine": def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) lowerCAmelCase__ : Tuple = [] for i in range(SCREAMING_SNAKE_CASE_ ): lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) class A__ ( __magic_name__ , __magic_name__ ): lowercase = [e.name for e in KarrasDiffusionSchedulers] lowercase = 2 @register_to_config def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ): '''simple docstring''' if trained_betas is not None: lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa ) elif beta_schedule == "linear": lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowerCAmelCase__ : Union[str, Any] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' ) elif beta_schedule == "exp": lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' ) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' ) lowerCAmelCase__ : int = 1.0 - self.betas lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(a , a , a ) lowerCAmelCase__ : Optional[Any] = use_karras_sigmas def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ): '''simple docstring''' if schedule_timesteps is None: lowerCAmelCase__ : List[str] = self.timesteps lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0 else: lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep lowerCAmelCase__ : Tuple = self._index_counter[timestep_int] return indices[pos].item() @property def _lowerCamelCase ( self : Dict ): '''simple docstring''' if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.index_for_timestep(a ) lowerCAmelCase__ : Any = self.sigmas[step_index] lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5) return sample def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ): '''simple docstring''' lowerCAmelCase__ : Any = num_inference_steps lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy() elif self.config.timestep_spacing == "leading": lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a ) timesteps -= 1 else: raise ValueError( f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) lowerCAmelCase__ : List[Any] = np.log(a ) lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a ) if self.config.use_karras_sigmas: lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps ) lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] ) lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a ) lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) lowerCAmelCase__ : Tuple = torch.from_numpy(a ) lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(a ).startswith('mps' ): # mps does not support float64 lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa ) else: lowerCAmelCase__ : Any = timesteps.to(device=a ) # empty dt and derivative lowerCAmelCase__ : str = None lowerCAmelCase__ : Optional[int] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter lowerCAmelCase__ : Optional[Any] = defaultdict(a ) def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = np.log(a ) # get distribution lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) lowerCAmelCase__ : List[str] = low_idx + 1 lowerCAmelCase__ : List[str] = log_sigmas[low_idx] lowerCAmelCase__ : Any = log_sigmas[high_idx] # interpolate sigmas lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high) lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 ) # transform interpolation to time range lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx lowerCAmelCase__ : Any = t.reshape(sigma.shape ) return t def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ): '''simple docstring''' lowerCAmelCase__ : float = in_sigmas[-1].item() lowerCAmelCase__ : float = in_sigmas[0].item() lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a ) lowerCAmelCase__ : Any = sigma_min ** (1 / rho) lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho) lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def _lowerCamelCase ( self : Any ): '''simple docstring''' return self.dt is None def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.index_for_timestep(a ) # advance index counter by 1 lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index] lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1] else: # 2nd order / Heun's method lowerCAmelCase__ : int = self.sigmas[step_index - 1] lowerCAmelCase__ : Any = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API lowerCAmelCase__ : Optional[int] = 0 lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next lowerCAmelCase__ : Any = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": lowerCAmelCase__ : int = model_output else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.config.clip_sample: lowerCAmelCase__ : str = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat # 3. delta timestep lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat # store for 2nd order step lowerCAmelCase__ : List[Any] = derivative lowerCAmelCase__ : str = dt lowerCAmelCase__ : Dict = sample else: # 2. 2nd order / Heun's method lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample lowerCAmelCase__ : Dict = self.dt lowerCAmelCase__ : Optional[int] = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" lowerCAmelCase__ : List[str] = None lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : str = None lowerCAmelCase__ : Tuple = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=a ) def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(a ): # mps does not support float64 lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa ) lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa ) else: lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device ) lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device ) lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps] lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): lowerCAmelCase__ : Any = sigma.unsqueeze(-1 ) lowerCAmelCase__ : List[str] = original_samples + noise * sigma return noisy_samples def __len__( self : int ): '''simple docstring''' return self.config.num_train_timesteps
307
1
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = { 'task_specific_params': { 'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4}, 'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4}, 'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6}, } } lowerCAmelCase__ : int = { 'task_specific_params.summarization.length_penalty': 1.0, 'task_specific_params.summarization.max_length': 128, 'task_specific_params.summarization.min_length': 12, 'task_specific_params.summarization.num_beams': 4, 'task_specific_params.summarization_cnn.length_penalty': 2.0, 'task_specific_params.summarization_cnn.max_length': 142, 'task_specific_params.summarization_cnn.min_length': 56, 'task_specific_params.summarization_cnn.num_beams': 4, 'task_specific_params.summarization_xsum.length_penalty': 1.0, 'task_specific_params.summarization_xsum.max_length': 62, 'task_specific_params.summarization_xsum.min_length': 11, 'task_specific_params.summarization_xsum.num_beams': 6, } self.assertEqual(flatten_dict(a ) , a ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) ) lowerCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 ) lowerCAmelCase__ : List[Any] = torch.tensor(a ) self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) ) lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : int = torch.tensor(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) ) @require_tf def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = np.random.randn(3 , 4 ) lowerCAmelCase__ : Any = tf.constant(a ) self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) ) lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : Dict = tf.constant(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) ) @require_flax def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 ) lowerCAmelCase__ : int = jnp.array(a ) self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) ) lowerCAmelCase__ : Any = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : str = jnp.array(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) ) lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) ) @require_torch def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 ) lowerCAmelCase__ : Dict = torch.tensor(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) ) lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : str = torch.tensor(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) ) @require_tf def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Dict = np.random.randn(3 , 4 ) lowerCAmelCase__ : List[Any] = tf.constant(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) ) lowerCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : Any = tf.constant(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) ) @require_flax def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Dict = np.random.randn(3 , 4 ) lowerCAmelCase__ : List[str] = jnp.array(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) ) lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : Union[str, Any] = jnp.array(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) ) lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) ) @require_torch def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 ) lowerCAmelCase__ : str = torch.tensor(a ) self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) ) lowerCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 ) lowerCAmelCase__ : Dict = torch.tensor(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) ) @require_tf def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 ) lowerCAmelCase__ : Any = tf.constant(a ) self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) ) lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 ) lowerCAmelCase__ : str = tf.constant(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) ) @require_flax def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 ) lowerCAmelCase__ : Union[str, Any] = jnp.array(a ) self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) ) lowerCAmelCase__ : str = np.random.randn(1 , 4 , 1 , 5 ) lowerCAmelCase__ : Optional[Any] = jnp.array(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) ) @require_torch def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : str = np.random.randn(3 , 4 ) lowerCAmelCase__ : str = torch.tensor(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) ) @require_tf def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 ) lowerCAmelCase__ : Any = tf.constant(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) ) @require_flax def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : int = np.random.randn(3 , 4 ) lowerCAmelCase__ : Tuple = jnp.array(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
307
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]: if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ): return x return (x, x) @require_tf class A__ : def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ): '''simple docstring''' pass def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' pass def _lowerCamelCase ( self : Dict ): '''simple docstring''' pass def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a ) lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a ) lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) ) def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a ) lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a ) lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a ) lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model} lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a ) lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a ) lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a ) lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a ) lowerCAmelCase__ : Union[str, Any] = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(a ) lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a ) lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a ) lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy() lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(a , 1E-5 ) def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a ) lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a ) lowerCAmelCase__ : str = model( input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a ) lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions self.assertEqual(len(a ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size ) lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size ) lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCAmelCase__ : int = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCAmelCase__ : str = output.text_model_output.attentions self.assertEqual(len(a ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ): '''simple docstring''' lowerCAmelCase__ : int = np.abs((a - b) ).max() self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**a ) def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Any = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**a ) def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : str = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**a ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() self.check_save_load(**a ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**a ) @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs() lowerCAmelCase__ : List[Any] = model_a(**a ) lowerCAmelCase__ : Optional[int] = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(a ) lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a ) lowerCAmelCase__ : List[str] = model_a(**a ) lowerCAmelCase__ : int = after_outputs[0].numpy() lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(a , 1E-5 ) @require_tf class A__ ( __magic_name__ , unittest.TestCase ): def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' ) lowerCAmelCase__ : int = 13 lowerCAmelCase__ : List[Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] ) lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' ) lowerCAmelCase__ : str = TFBertModel(a , name='text_model' ) return vision_model, text_model def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = TFViTModelTester(self ) lowerCAmelCase__ : Tuple = TFBertModelTester(self ) lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs() lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : str = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class A__ ( __magic_name__ , unittest.TestCase ): def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' ) lowerCAmelCase__ : Tuple = 13 lowerCAmelCase__ : Any = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] ) lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a ) lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a ) lowerCAmelCase__ : Any = model( input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a ) lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions self.assertEqual(len(a ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size ) lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size ) lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCAmelCase__ : int = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCAmelCase__ : List[str] = output.text_model_output.attentions self.assertEqual(len(a ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _lowerCamelCase ( self : int , a : Optional[int] , a : int ): '''simple docstring''' lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' ) lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' ) return vision_model, text_model def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Dict = TFDeiTModelTester(self ) lowerCAmelCase__ : List[str] = TFRobertaModelTester(self ) lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs() lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Any = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class A__ ( __magic_name__ , unittest.TestCase ): def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' ) lowerCAmelCase__ : Dict = 13 lowerCAmelCase__ : str = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] ) lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def _lowerCamelCase ( self : str , a : int , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' ) lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' ) return vision_model, text_model def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self ) lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self ) lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs() lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : str = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class A__ ( unittest.TestCase ): @slow def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained( 'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a ) lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' ) lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) lowerCAmelCase__ : Any = processor( text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' ) lowerCAmelCase__ : Union[str, Any] = model(**a ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
307
1
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { """configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""], """feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""], """processing_mctct""": ["""MCTCTProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MCTCTForCTC""", """MCTCTModel""", """MCTCTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) lowerCamelCase__ = logging.getLogger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 ) return np.sum(outputs == labels ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]: with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f: lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Dict = [] next(SCREAMING_SNAKE_CASE_ ) # skip the first line for line in tqdm(SCREAMING_SNAKE_CASE_ ): output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: lowerCAmelCase__ : Dict = [] for dataset in encoded_datasets: lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa ) lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa ) lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(SCREAMING_SNAKE_CASE_ ): lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCAmelCase__ : Optional[Any] = with_conta lowerCAmelCase__ : List[str] = with_conta lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1 lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1 lowerCAmelCase__ : Tuple = with_conta lowerCAmelCase__ : Optional[int] = with_conta lowerCAmelCase__ : Optional[int] = mc_label lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) ) return tensor_datasets def lowerCAmelCase__ ( ) -> int: lowerCAmelCase__ : int = argparse.ArgumentParser() parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' ) parser.add_argument( '--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' ) parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' ) parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 ) parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 ) parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 ) parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 ) parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 ) parser.add_argument( '--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=( 'If > 0: set total number of training steps to perform. Override num_train_epochs.' ) , ) parser.add_argument( '--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 ) parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' ) parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' ) parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 ) parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 ) parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 ) parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' ) lowerCAmelCase__ : List[str] = parser.parse_args() print(SCREAMING_SNAKE_CASE_ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) lowerCAmelCase__ : Dict = torch.cuda.device_count() logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) if not args.do_train and not args.do_eval: raise ValueError('At least one of `do_train` or `do_eval` must be True.' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_'] lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) ) model.to(SCREAMING_SNAKE_CASE_ ) # Load and encode the datasets def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return obj return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj] logger.info('Encoding dataset...' ) lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset ) lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset ) lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset) lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) # Compute the max input length for the Transformer lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2 lowerCAmelCase__ : Tuple = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1] lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size ) lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: lowerCAmelCase__ : Union[str, Any] = args.max_steps lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1 else: lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs lowerCAmelCase__ : Optional[int] = list(model.named_parameters() ) lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] lowerCAmelCase__ : str = [ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0}, ] lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon ) lowerCAmelCase__ : int = get_linear_schedule_with_warmup( SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ ) if args.do_train: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ): lowerCAmelCase__ : str = 0 lowerCAmelCase__ : int = 0 lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' ) for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() lowerCAmelCase__ : Optional[int] = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ ) torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ ) model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(SCREAMING_SNAKE_CASE_ ) if args.do_eval: model.eval() lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0 lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ): lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch with torch.no_grad(): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model( SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy() lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy() lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' ) with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) if __name__ == "__main__": main()
307
1
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) lowerCamelCase__ = logging.getLogger(__name__) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: lowerCAmelCase__ : Dict = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 ) return np.sum(outputs == labels ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]: with open(SCREAMING_SNAKE_CASE_ , encoding='utf_8' ) as f: lowerCAmelCase__ : Dict = csv.reader(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Dict = [] next(SCREAMING_SNAKE_CASE_ ) # skip the first line for line in tqdm(SCREAMING_SNAKE_CASE_ ): output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: lowerCAmelCase__ : Dict = [] for dataset in encoded_datasets: lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Optional[int] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) lowerCAmelCase__ : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa ) lowerCAmelCase__ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa ) lowerCAmelCase__ : Tuple = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(SCREAMING_SNAKE_CASE_ ): lowerCAmelCase__ : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCAmelCase__ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCAmelCase__ : Optional[Any] = with_conta lowerCAmelCase__ : List[str] = with_conta lowerCAmelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1 lowerCAmelCase__ : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1 lowerCAmelCase__ : Tuple = with_conta lowerCAmelCase__ : Optional[int] = with_conta lowerCAmelCase__ : Optional[int] = mc_label lowerCAmelCase__ : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) ) return tensor_datasets def lowerCAmelCase__ ( ) -> int: lowerCAmelCase__ : int = argparse.ArgumentParser() parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , default='openai-gpt' , help='pretrained model name' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' ) parser.add_argument( '--output_dir' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' ) parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE_ , default='' ) parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE_ , default=42 ) parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE_ , default=3 ) parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=8 ) parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=16 ) parser.add_argument('--adam_epsilon' , default=1e-8 , type=SCREAMING_SNAKE_CASE_ , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE_ , default=1 ) parser.add_argument( '--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=( 'If > 0: set total number of training steps to perform. Override num_train_epochs.' ) , ) parser.add_argument( '--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=6.25e-5 ) parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='Linear warmup over warmup_steps.' ) parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE_ , default='warmup_linear' ) parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE_ , default=0.01 ) parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE_ , default=0.9 ) parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE_ , default=374 ) parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE_ , default='' , help='Can be used for distant debugging.' ) lowerCAmelCase__ : List[str] = parser.parse_args() print(SCREAMING_SNAKE_CASE_ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) lowerCAmelCase__ : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) lowerCAmelCase__ : Dict = torch.cuda.device_count() logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) if not args.do_train and not args.do_eval: raise ValueError('At least one of `do_train` or `do_eval` must be True.' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset lowerCAmelCase__ : Union[str, Any] = ['_start_', '_delimiter_', '_classify_'] lowerCAmelCase__ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) ) model.to(SCREAMING_SNAKE_CASE_ ) # Load and encode the datasets def tokenize_and_encode(SCREAMING_SNAKE_CASE_ ): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return obj return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj] logger.info('Encoding dataset...' ) lowerCAmelCase__ : List[Any] = load_rocstories_dataset(args.train_dataset ) lowerCAmelCase__ : str = load_rocstories_dataset(args.eval_dataset ) lowerCAmelCase__ : Union[str, Any] = (train_dataset, eval_dataset) lowerCAmelCase__ : List[str] = tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) # Compute the max input length for the Transformer lowerCAmelCase__ : Union[str, Any] = model.config.n_positions // 2 - 2 lowerCAmelCase__ : Tuple = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) lowerCAmelCase__ : Dict = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders lowerCAmelCase__ : int = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1] lowerCAmelCase__ : str = TensorDataset(*SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Tuple = RandomSampler(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Tuple = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size ) lowerCAmelCase__ : Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Dict = SequentialSampler(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Any = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: lowerCAmelCase__ : Union[str, Any] = args.max_steps lowerCAmelCase__ : int = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1 else: lowerCAmelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs lowerCAmelCase__ : Optional[int] = list(model.named_parameters() ) lowerCAmelCase__ : Tuple = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] lowerCAmelCase__ : str = [ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0}, ] lowerCAmelCase__ : Union[str, Any] = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon ) lowerCAmelCase__ : int = get_linear_schedule_with_warmup( SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ ) if args.do_train: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ): lowerCAmelCase__ : str = 0 lowerCAmelCase__ : int = 0 lowerCAmelCase__ : str = tqdm(SCREAMING_SNAKE_CASE_ , desc='Training' ) for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ): lowerCAmelCase__ : Union[str, Any] = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = batch lowerCAmelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : str = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() lowerCAmelCase__ : Optional[int] = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 lowerCAmelCase__ : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer lowerCAmelCase__ : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE_ , 'module' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ ) torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ ) model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned lowerCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) lowerCAmelCase__ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(SCREAMING_SNAKE_CASE_ ) if args.do_eval: model.eval() lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0 lowerCAmelCase__ , lowerCAmelCase__ : Any = 0, 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc='Evaluating' ): lowerCAmelCase__ : str = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = batch with torch.no_grad(): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = model( SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Any = mc_logits.detach().cpu().numpy() lowerCAmelCase__ : List[Any] = mc_labels.to('cpu' ).numpy() lowerCAmelCase__ : str = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 lowerCAmelCase__ : Optional[int] = eval_loss / nb_eval_steps lowerCAmelCase__ : Any = eval_accuracy / nb_eval_examples lowerCAmelCase__ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None lowerCAmelCase__ : Tuple = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} lowerCAmelCase__ : Dict = os.path.join(args.output_dir , 'eval_results.txt' ) with open(SCREAMING_SNAKE_CASE_ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , SCREAMING_SNAKE_CASE_ , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) if __name__ == "__main__": main()
307
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("""dataclasses""") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("""importlib_metadata""") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> int: require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
307
1
from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class A__ ( __magic_name__ ): lowercase = 'gptj' lowercase = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : str , a : Tuple=50_400 , a : List[Any]=2_048 , a : Tuple=4_096 , a : List[Any]=28 , a : Union[str, Any]=16 , a : Optional[Any]=64 , a : Any=None , a : str="gelu_new" , a : List[str]=0.0 , a : Optional[int]=0.0 , a : Tuple=0.0 , a : Dict=1E-5 , a : str=0.0_2 , a : Optional[Any]=True , a : List[Any]=50_256 , a : Optional[int]=50_256 , a : List[Any]=False , **a : Optional[int] , ): '''simple docstring''' lowerCAmelCase__ : int = vocab_size lowerCAmelCase__ : Tuple = n_positions lowerCAmelCase__ : Union[str, Any] = n_embd lowerCAmelCase__ : str = n_layer lowerCAmelCase__ : List[str] = n_head lowerCAmelCase__ : Tuple = n_inner lowerCAmelCase__ : Tuple = rotary_dim lowerCAmelCase__ : Any = activation_function lowerCAmelCase__ : str = resid_pdrop lowerCAmelCase__ : int = embd_pdrop lowerCAmelCase__ : Any = attn_pdrop lowerCAmelCase__ : Optional[int] = layer_norm_epsilon lowerCAmelCase__ : List[str] = initializer_range lowerCAmelCase__ : Union[str, Any] = use_cache lowerCAmelCase__ : Any = bos_token_id lowerCAmelCase__ : int = eos_token_id super().__init__( bos_token_id=a , eos_token_id=a , tie_word_embeddings=a , **a ) class A__ ( __magic_name__ ): def __init__( self : List[Any] , a : PretrainedConfig , a : str = "default" , a : List[PatchingSpec] = None , a : bool = False , ): '''simple docstring''' super().__init__(a , task=a , patching_specs=a , use_past=a ) if not getattr(self._config , 'pad_token_id' , a ): # TODO: how to do that better? lowerCAmelCase__ : List[Any] = 0 @property def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(a , direction='inputs' ) lowerCAmelCase__ : Tuple = {0: 'batch', 1: 'past_sequence + sequence'} else: lowerCAmelCase__ : Optional[Any] = {0: 'batch', 1: 'sequence'} return common_inputs @property def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' return self._config.n_layer @property def _lowerCamelCase ( self : List[str] ): '''simple docstring''' return self._config.n_head def _lowerCamelCase ( self : Tuple , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = super(a , self ).generate_dummy_inputs( a , batch_size=a , seq_length=a , is_pair=a , framework=a ) # We need to order the input in the way they appears in the forward() lowerCAmelCase__ : Optional[int] = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Dict = common_inputs['input_ids'].shape # Not using the same length for past_key_values lowerCAmelCase__ : Optional[int] = seqlen + 2 lowerCAmelCase__ : Dict = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCAmelCase__ : Optional[Any] = [ (torch.zeros(a ), torch.zeros(a )) for _ in range(self.num_layers ) ] lowerCAmelCase__ : List[Any] = common_inputs['attention_mask'] if self.use_past: lowerCAmelCase__ : Union[str, Any] = ordered_inputs['attention_mask'].dtype lowerCAmelCase__ : List[Any] = torch.cat( [ordered_inputs['attention_mask'], torch.ones(a , a , dtype=a )] , dim=1 ) return ordered_inputs @property def _lowerCamelCase ( self : Any ): '''simple docstring''' return 13
307
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class A__ ( __magic_name__ ): lowercase = (DPMSolverSDEScheduler,) lowercase = 10 def _lowerCamelCase ( self : Optional[int] , **a : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = { 'num_train_timesteps': 1_100, 'beta_start': 0.0_0_0_1, 'beta_end': 0.0_2, 'beta_schedule': 'linear', 'noise_sampler_seed': 0, } config.update(**a ) return config def _lowerCamelCase ( self : Tuple ): '''simple docstring''' for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=a ) def _lowerCamelCase ( self : int ): '''simple docstring''' for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=a , beta_end=a ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=a ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : int = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config() lowerCAmelCase__ : List[Any] = scheduler_class(**a ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Dict = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : int = sample.to(a ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(a , a ) lowerCAmelCase__ : str = model(a , a ) lowerCAmelCase__ : int = scheduler.step(a , a , a ) lowerCAmelCase__ : Any = output.prev_sample lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2 assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3 else: assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3 def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' ) lowerCAmelCase__ : Any = scheduler_class(**a ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Optional[int] = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Any = sample.to(a ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : str = scheduler.scale_model_input(a , a ) lowerCAmelCase__ : str = model(a , a ) lowerCAmelCase__ : Dict = scheduler.step(a , a , a ) lowerCAmelCase__ : Tuple = output.prev_sample lowerCAmelCase__ : int = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2 assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3 else: assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3 def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0] lowerCAmelCase__ : Optional[int] = self.get_scheduler_config() lowerCAmelCase__ : int = scheduler_class(**a ) scheduler.set_timesteps(self.num_inference_steps , device=a ) lowerCAmelCase__ : Tuple = self.dummy_model() lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase__ : Dict = scheduler.scale_model_input(a , a ) lowerCAmelCase__ : Optional[int] = model(a , a ) lowerCAmelCase__ : Tuple = scheduler.step(a , a , a ) lowerCAmelCase__ : Dict = output.prev_sample lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Dict = torch.mean(torch.abs(a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3 else: assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3 def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.scheduler_classes[0] lowerCAmelCase__ : Any = self.get_scheduler_config() lowerCAmelCase__ : Any = scheduler_class(**a , use_karras_sigmas=a ) scheduler.set_timesteps(self.num_inference_steps , device=a ) lowerCAmelCase__ : str = self.dummy_model() lowerCAmelCase__ : Any = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma lowerCAmelCase__ : str = sample.to(a ) for t in scheduler.timesteps: lowerCAmelCase__ : Any = scheduler.scale_model_input(a , a ) lowerCAmelCase__ : int = model(a , a ) lowerCAmelCase__ : Union[str, Any] = scheduler.step(a , a , a ) lowerCAmelCase__ : Union[str, Any] = output.prev_sample lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) ) lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2 else: assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
307
1
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class A__ ( __magic_name__ , unittest.TestCase ): lowercase = BertTokenizer lowercase = BertTokenizerFast lowercase = True lowercase = True lowercase = filter_non_english def _lowerCamelCase ( self : Tuple ): '''simple docstring''' super().setUp() lowerCAmelCase__ : Union[str, Any] = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def _lowerCamelCase ( self : Union[str, Any] , a : int ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = 'UNwant\u00E9d,running' lowerCAmelCase__ : int = 'unwanted, running' return input_text, output_text def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : str = self.tokenizer_class(self.vocab_file ) lowerCAmelCase__ : Optional[int] = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(a , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [9, 6, 7, 12, 10, 11] ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCAmelCase__ : Optional[Any] = self.get_tokenizer() lowerCAmelCase__ : List[str] = self.get_rust_tokenizer() lowerCAmelCase__ : Union[str, Any] = 'UNwant\u00E9d,running' lowerCAmelCase__ : List[Any] = tokenizer.tokenize(a ) lowerCAmelCase__ : int = rust_tokenizer.tokenize(a ) self.assertListEqual(a , a ) lowerCAmelCase__ : Optional[int] = tokenizer.encode(a , add_special_tokens=a ) lowerCAmelCase__ : Dict = rust_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) lowerCAmelCase__ : str = self.get_rust_tokenizer() lowerCAmelCase__ : Optional[Any] = tokenizer.encode(a ) lowerCAmelCase__ : str = rust_tokenizer.encode(a ) self.assertListEqual(a , a ) # With lower casing lowerCAmelCase__ : int = self.get_tokenizer(do_lower_case=a ) lowerCAmelCase__ : List[str] = self.get_rust_tokenizer(do_lower_case=a ) lowerCAmelCase__ : Dict = 'UNwant\u00E9d,running' lowerCAmelCase__ : Dict = tokenizer.tokenize(a ) lowerCAmelCase__ : Optional[int] = rust_tokenizer.tokenize(a ) self.assertListEqual(a , a ) lowerCAmelCase__ : List[Any] = tokenizer.encode(a , add_special_tokens=a ) lowerCAmelCase__ : Optional[int] = rust_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) lowerCAmelCase__ : str = self.get_rust_tokenizer() lowerCAmelCase__ : Any = tokenizer.encode(a ) lowerCAmelCase__ : Optional[Any] = rust_tokenizer.encode(a ) self.assertListEqual(a , a ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : List[Any] = BasicTokenizer(do_lower_case=a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = BasicTokenizer(do_lower_case=a , strip_accents=a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Tuple = BasicTokenizer(do_lower_case=a , strip_accents=a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : str = BasicTokenizer(do_lower_case=a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : str = BasicTokenizer(do_lower_case=a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=a , strip_accents=a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=a , strip_accents=a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Any = BasicTokenizer(do_lower_case=a , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = BasicTokenizer() lowerCAmelCase__ : Tuple = 'a\n\'ll !!to?\'d of, can\'t.' lowerCAmelCase__ : Optional[Any] = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.'] self.assertListEqual(tokenizer.tokenize(a ) , a ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] lowerCAmelCase__ : Dict = {} for i, token in enumerate(a ): lowerCAmelCase__ : Union[str, Any] = i lowerCAmelCase__ : Optional[Any] = WordpieceTokenizer(vocab=a , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Dict = self.get_tokenizer() lowerCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) self.assertListEqual( [rust_tokenizer.tokenize(a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) @slow def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained('bert-base-uncased' ) lowerCAmelCase__ : List[Any] = tokenizer.encode('sequence builders' , add_special_tokens=a ) lowerCAmelCase__ : Optional[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=a ) lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(a ) lowerCAmelCase__ : Dict = tokenizer.build_inputs_with_special_tokens(a , a ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def _lowerCamelCase ( self : List[str] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCAmelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(a , **a ) lowerCAmelCase__ : Optional[int] = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' lowerCAmelCase__ : List[Any] = tokenizer_r.encode_plus( a , return_attention_mask=a , return_token_type_ids=a , return_offsets_mapping=a , add_special_tokens=a , ) lowerCAmelCase__ : Tuple = tokenizer_r.do_lower_case if hasattr(a , 'do_lower_case' ) else False lowerCAmelCase__ : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = ['的', '人', '有'] lowerCAmelCase__ : Optional[Any] = ''.join(a ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCAmelCase__ : str = True lowerCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(a , **a ) lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(a , **a ) lowerCAmelCase__ : List[str] = tokenizer_p.encode(a , add_special_tokens=a ) lowerCAmelCase__ : Dict = tokenizer_r.encode(a , add_special_tokens=a ) lowerCAmelCase__ : str = tokenizer_r.convert_ids_to_tokens(a ) lowerCAmelCase__ : Tuple = tokenizer_p.convert_ids_to_tokens(a ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(a , a ) self.assertListEqual(a , a ) lowerCAmelCase__ : Any = False lowerCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(a , **a ) lowerCAmelCase__ : Any = self.tokenizer_class.from_pretrained(a , **a ) lowerCAmelCase__ : int = tokenizer_r.encode(a , add_special_tokens=a ) lowerCAmelCase__ : Optional[Any] = tokenizer_p.encode(a , add_special_tokens=a ) lowerCAmelCase__ : Tuple = tokenizer_r.convert_ids_to_tokens(a ) lowerCAmelCase__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(a ) # it is expected that only the first Chinese character is not preceded by "##". lowerCAmelCase__ : Optional[int] = [ f'''##{token}''' if idx != 0 else token for idx, token in enumerate(a ) ] self.assertListEqual(a , a ) self.assertListEqual(a , a )
307
import os import string import sys lowerCamelCase__ = 1 << 8 lowerCamelCase__ = { """tab""": ord("""\t"""), """newline""": ord("""\r"""), """esc""": 27, """up""": 65 + ARROW_KEY_FLAG, """down""": 66 + ARROW_KEY_FLAG, """right""": 67 + ARROW_KEY_FLAG, """left""": 68 + ARROW_KEY_FLAG, """mod_int""": 91, """undefined""": sys.maxsize, """interrupt""": 3, """insert""": 50, """delete""": 51, """pg_up""": 53, """pg_down""": 54, } lowerCamelCase__ = KEYMAP["""up"""] lowerCamelCase__ = KEYMAP["""left"""] if sys.platform == "win32": lowerCamelCase__ = [] lowerCamelCase__ = { b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, } for i in range(10): lowerCamelCase__ = ord(str(i)) def lowerCAmelCase__ ( ) -> Dict: if os.name == "nt": import msvcrt lowerCAmelCase__ : Dict = 'mbcs' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(SCREAMING_SNAKE_CASE_ ) == 0: # Read the keystroke lowerCAmelCase__ : Optional[Any] = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): lowerCAmelCase__ : Dict = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) ) WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ ) if ord(SCREAMING_SNAKE_CASE_ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] ) except KeyError: lowerCAmelCase__ : Dict = cha[1] else: lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ ) else: lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty lowerCAmelCase__ : Tuple = sys.stdin.fileno() lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ ) try: tty.setraw(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 ) finally: termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ ) return ch def lowerCAmelCase__ ( ) -> Union[str, Any]: lowerCAmelCase__ : Any = get_raw_chars() if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]: lowerCAmelCase__ : Union[str, Any] = get_raw_chars() if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]: lowerCAmelCase__ : str = get_raw_chars() if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
307
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = 'ZinengTang/tvlt-base' lowerCAmelCase__ : Any = tempfile.mkdtemp() def _lowerCamelCase ( self : Union[str, Any] , **a : Optional[int] ): '''simple docstring''' return TvltImageProcessor.from_pretrained(self.checkpoint , **a ) def _lowerCamelCase ( self : List[str] , **a : Any ): '''simple docstring''' return TvltFeatureExtractor.from_pretrained(self.checkpoint , **a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.get_image_processor() lowerCAmelCase__ : int = self.get_feature_extractor() lowerCAmelCase__ : List[str] = TvltProcessor(image_processor=a , feature_extractor=a ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ : Any = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , a ) self.assertIsInstance(processor.image_processor , a ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : int = self.get_image_processor() lowerCAmelCase__ : str = self.get_feature_extractor() lowerCAmelCase__ : List[str] = TvltProcessor(image_processor=a , feature_extractor=a ) lowerCAmelCase__ : List[str] = np.ones([12_000] ) lowerCAmelCase__ : Dict = feature_extractor(a , return_tensors='np' ) lowerCAmelCase__ : Dict = processor(audio=a , return_tensors='np' ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = self.get_image_processor() lowerCAmelCase__ : Dict = self.get_feature_extractor() lowerCAmelCase__ : List[str] = TvltProcessor(image_processor=a , feature_extractor=a ) lowerCAmelCase__ : Optional[int] = np.ones([3, 224, 224] ) lowerCAmelCase__ : List[str] = image_processor(a , return_tensors='np' ) lowerCAmelCase__ : int = processor(images=a , return_tensors='np' ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Dict = self.get_image_processor() lowerCAmelCase__ : List[str] = self.get_feature_extractor() lowerCAmelCase__ : Dict = TvltProcessor(image_processor=a , feature_extractor=a ) lowerCAmelCase__ : List[str] = np.ones([12_000] ) lowerCAmelCase__ : Optional[Any] = np.ones([3, 224, 224] ) lowerCAmelCase__ : int = processor(audio=a , images=a ) self.assertListEqual(list(inputs.keys() ) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'] ) # test if it raises when no input is passed with pytest.raises(a ): processor() def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.get_image_processor() lowerCAmelCase__ : Union[str, Any] = self.get_feature_extractor() lowerCAmelCase__ : List[str] = TvltProcessor(image_processor=a , feature_extractor=a ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
307
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str: return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0: raise ValueError( 'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ): raise ValueError( 'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
307
1
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position lowerCamelCase__ = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip lowerCamelCase__ = concatenate_datasets lowerCamelCase__ = DownloadConfig lowerCamelCase__ = DownloadManager lowerCamelCase__ = DownloadMode lowerCamelCase__ = DownloadConfig lowerCamelCase__ = DownloadMode lowerCamelCase__ = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
307
from __future__ import annotations def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]: lowerCAmelCase__ : list[list[int]] = [] create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ ) return result def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None: if level == 0: total_list.append(current_list[:] ) return for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ): current_list.append(SCREAMING_SNAKE_CASE_ ) create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) current_list.pop() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None: for i in total_list: print(*SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowerCamelCase__ = 4 lowerCamelCase__ = 2 lowerCamelCase__ = generate_all_combinations(n, k) print_all_state(total_list)
307
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase__ = { """vocab_file""": { """yjernite/retribert-base-uncased""": ( """https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """yjernite/retribert-base-uncased""": ( """https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json""" ), }, } lowerCamelCase__ = { """yjernite/retribert-base-uncased""": 512, } lowerCamelCase__ = { """yjernite/retribert-base-uncased""": {"""do_lower_case""": True}, } class A__ ( __magic_name__ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = RetriBertTokenizer lowercase = ['input_ids', 'attention_mask'] def __init__( self : Tuple , a : Tuple=None , a : Dict=None , a : int=True , a : List[str]="[UNK]" , a : str="[SEP]" , a : List[Any]="[PAD]" , a : str="[CLS]" , a : int="[MASK]" , a : Optional[int]=True , a : Tuple=None , **a : Optional[Any] , ): '''simple docstring''' super().__init__( a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , ) lowerCAmelCase__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , a ) != do_lower_case or normalizer_state.get('strip_accents' , a ) != strip_accents or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars ): lowerCAmelCase__ : Union[str, Any] = getattr(a , normalizer_state.pop('type' ) ) lowerCAmelCase__ : List[Any] = do_lower_case lowerCAmelCase__ : List[str] = strip_accents lowerCAmelCase__ : Optional[int] = tokenize_chinese_chars lowerCAmelCase__ : int = normalizer_class(**a ) lowerCAmelCase__ : List[str] = do_lower_case def _lowerCamelCase ( self : Any , a : List[str] , a : Optional[int]=None ): '''simple docstring''' lowerCAmelCase__ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowerCamelCase ( self : int , a : List[int] , a : Optional[List[int]] = None ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = [self.sep_token_id] lowerCAmelCase__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCamelCase ( self : Dict , a : str , a : Optional[str] = None ): '''simple docstring''' lowerCAmelCase__ : Dict = self._tokenizer.model.save(a , name=a ) return tuple(a )
307
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class A__ ( unittest.TestCase ): @parameterized.expand([(None,), ('foo.json',)] ) def _lowerCamelCase ( self : Dict , a : str ): '''simple docstring''' lowerCAmelCase__ : List[str] = GenerationConfig( do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(a , config_name=a ) lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , a ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , a ) def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Dict = AutoConfig.from_pretrained('gpt2' ) lowerCAmelCase__ : Any = GenerationConfig.from_model_config(a ) lowerCAmelCase__ : Any = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(a , a ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Dict = GenerationConfig() lowerCAmelCase__ : Dict = { 'max_new_tokens': 1_024, 'foo': 'bar', } lowerCAmelCase__ : List[Any] = copy.deepcopy(a ) lowerCAmelCase__ : Dict = generation_config.update(**a ) # update_kwargs was not modified (no side effects) self.assertEqual(a , a ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1_024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(a , {'foo': 'bar'} ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Dict = GenerationConfig() lowerCAmelCase__ : List[Any] = 'bar' with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir: generation_config.save_pretrained(a ) lowerCAmelCase__ : List[Any] = GenerationConfig.from_pretrained(a ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , 'bar' ) lowerCAmelCase__ : int = GenerationConfig.from_model_config(a ) assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , a ) self.assertEqual(default_config.num_beams , 1 ) lowerCAmelCase__ : List[Any] = GenerationConfig( do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , a ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(a ) lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(a , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , a ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class A__ ( unittest.TestCase ): @classmethod def _lowerCamelCase ( cls : int ): '''simple docstring''' lowerCAmelCase__ : List[str] = TOKEN HfFolder.save_token(a ) @classmethod def _lowerCamelCase ( cls : Optional[int] ): '''simple docstring''' try: delete_repo(token=cls._token , repo_id='test-generation-config' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' ) except HTTPError: pass def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = GenerationConfig( do_sample=a , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('test-generation-config' , use_auth_token=self._token ) lowerCAmelCase__ : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(a , getattr(a , a ) ) # Reset repo delete_repo(token=self._token , repo_id='test-generation-config' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token ) lowerCAmelCase__ : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(a , getattr(a , a ) ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : int = GenerationConfig( do_sample=a , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token ) lowerCAmelCase__ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(a , getattr(a , a ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token ) lowerCAmelCase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(a , getattr(a , a ) )
307
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class A__ ( __magic_name__ ): lowercase = 'char' lowercase = 'bpe' lowercase = 'wp' lowerCamelCase__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class A__ ( __magic_name__ ): lowercase = ['image_processor', 'char_tokenizer'] lowercase = 'ViTImageProcessor' lowercase = 'MgpstrTokenizer' def __init__( self : Any , a : Any=None , a : Optional[int]=None , **a : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , a , ) lowerCAmelCase__ : Union[str, Any] = kwargs.pop('feature_extractor' ) lowerCAmelCase__ : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) lowerCAmelCase__ : List[str] = tokenizer lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('gpt2' ) lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(a , a ) def __call__( self : List[str] , a : str=None , a : Dict=None , a : Any=None , **a : List[str] ): '''simple docstring''' if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: lowerCAmelCase__ : Union[str, Any] = self.image_processor(a , return_tensors=a , **a ) if text is not None: lowerCAmelCase__ : Union[str, Any] = self.char_tokenizer(a , return_tensors=a , **a ) if text is None: return inputs elif images is None: return encodings else: lowerCAmelCase__ : Union[str, Any] = encodings['input_ids'] return inputs def _lowerCamelCase ( self : List[Any] , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = sequences lowerCAmelCase__ : Dict = char_preds.size(0 ) lowerCAmelCase__ , lowerCAmelCase__ : Any = self._decode_helper(a , 'char' ) lowerCAmelCase__ , lowerCAmelCase__ : Dict = self._decode_helper(a , 'bpe' ) lowerCAmelCase__ , lowerCAmelCase__ : Dict = self._decode_helper(a , 'wp' ) lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[int] = [] for i in range(a ): lowerCAmelCase__ : Tuple = [char_scores[i], bpe_scores[i], wp_scores[i]] lowerCAmelCase__ : Union[str, Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] lowerCAmelCase__ : List[Any] = scores.index(max(a ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) lowerCAmelCase__ : str = {} lowerCAmelCase__ : Dict = final_strs lowerCAmelCase__ : int = final_scores lowerCAmelCase__ : Tuple = char_strs lowerCAmelCase__ : List[str] = bpe_strs lowerCAmelCase__ : Optional[Any] = wp_strs return out def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[str] ): '''simple docstring''' if format == DecodeType.CHARACTER: lowerCAmelCase__ : int = self.char_decode lowerCAmelCase__ : int = 1 lowerCAmelCase__ : List[Any] = '[s]' elif format == DecodeType.BPE: lowerCAmelCase__ : Optional[Any] = self.bpe_decode lowerCAmelCase__ : int = 2 lowerCAmelCase__ : Union[str, Any] = '#' elif format == DecodeType.WORDPIECE: lowerCAmelCase__ : str = self.wp_decode lowerCAmelCase__ : Optional[Any] = 102 lowerCAmelCase__ : str = '[SEP]' else: raise ValueError(f'''Format {format} is not supported.''' ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = [], [] lowerCAmelCase__ : str = pred_logits.size(0 ) lowerCAmelCase__ : str = pred_logits.size(1 ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = pred_logits.topk(1 , dim=-1 , largest=a , sorted=a ) lowerCAmelCase__ : Tuple = preds_index.view(-1 , a )[:, 1:] lowerCAmelCase__ : List[str] = decoder(a ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = torch.nn.functional.softmax(a , dim=2 ).max(dim=2 ) lowerCAmelCase__ : Any = preds_max_prob[:, 1:] for index in range(a ): lowerCAmelCase__ : Optional[Any] = preds_str[index].find(a ) lowerCAmelCase__ : str = preds_str[index][:pred_eos] lowerCAmelCase__ : Optional[Any] = preds_index[index].cpu().tolist() lowerCAmelCase__ : List[str] = pred_index.index(a ) if eos_token in pred_index else -1 lowerCAmelCase__ : int = preds_max_prob[index][: pred_eos_index + 1] lowerCAmelCase__ : str = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(a ) conf_scores.append(a ) return dec_strs, conf_scores def _lowerCamelCase ( self : Optional[Any] , a : Any ): '''simple docstring''' lowerCAmelCase__ : str = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(a )] return decode_strs def _lowerCamelCase ( self : str , a : Tuple ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(a ) def _lowerCamelCase ( self : Union[str, Any] , a : str ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(a )] return decode_strs
307
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNetaDConditionModel, UNetaDModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( __magic_name__ , unittest.TestCase ): lowercase = UnCLIPImageVariationPipeline lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'} lowercase = IMAGE_VARIATION_BATCH_PARAMS lowercase = [ 'generator', 'return_dict', 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] lowercase = False @property def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' return 32 @property def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return 32 @property def _lowerCamelCase ( self : int ): '''simple docstring''' return self.time_input_dim @property def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' return self.time_input_dim * 4 @property def _lowerCamelCase ( self : Any ): '''simple docstring''' return 100 @property def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def _lowerCamelCase ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(a ) @property def _lowerCamelCase ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ : List[Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) return CLIPVisionModelWithProjection(a ) @property def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ : Union[str, Any] = { 'clip_embeddings_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'cross_attention_dim': self.cross_attention_dim, } lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a ) return model @property def _lowerCamelCase ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ : str = { 'sample_size': 32, # RGB in channels 'in_channels': 3, # Out channels is double in channels because predicts mean and variance 'out_channels': 6, 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': 'identity', } lowerCAmelCase__ : str = UNetaDConditionModel(**a ) return model @property def _lowerCamelCase ( self : str ): '''simple docstring''' return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "in_channels": 6, "out_channels": 3, } @property def _lowerCamelCase ( self : str ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs ) return model @property def _lowerCamelCase ( self : int ): '''simple docstring''' torch.manual_seed(1 ) lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs ) return model def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.dummy_decoder lowerCAmelCase__ : Optional[int] = self.dummy_text_proj lowerCAmelCase__ : Any = self.dummy_text_encoder lowerCAmelCase__ : Any = self.dummy_tokenizer lowerCAmelCase__ : Any = self.dummy_super_res_first lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last lowerCAmelCase__ : Dict = UnCLIPScheduler( variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , ) lowerCAmelCase__ : Any = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , ) lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 ) lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ): '''simple docstring''' lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a ) if str(a ).startswith('mps' ): lowerCAmelCase__ : Optional[int] = torch.manual_seed(a ) else: lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a ) if pil_image: lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5 lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 ) lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = 'cpu' lowerCAmelCase__ : Any = self.get_dummy_components() lowerCAmelCase__ : List[str] = self.pipeline_class(**a ) lowerCAmelCase__ : Dict = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a ) lowerCAmelCase__ : str = pipe(**a ) lowerCAmelCase__ : Optional[Any] = output.images lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a ) lowerCAmelCase__ : Optional[int] = pipe( **a , return_dict=a , )[0] lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : List[str] = np.array( [ 0.9_9_9_7, 0.0_0_0_2, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_6_9, 0.0_0_2_3, 0.9_9_9_7, 0.9_9_6_9, 0.9_9_7_0, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = 'cpu' lowerCAmelCase__ : Dict = self.get_dummy_components() lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a ) lowerCAmelCase__ : int = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a ) lowerCAmelCase__ : List[str] = pipe(**a ) lowerCAmelCase__ : Union[str, Any] = output.images lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a ) lowerCAmelCase__ : int = pipe( **a , return_dict=a , )[0] lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1] lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Tuple = 'cpu' lowerCAmelCase__ : int = self.get_dummy_components() lowerCAmelCase__ : Tuple = self.pipeline_class(**a ) lowerCAmelCase__ : Union[str, Any] = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a ) lowerCAmelCase__ : List[str] = [ pipeline_inputs['image'], pipeline_inputs['image'], ] lowerCAmelCase__ : Optional[int] = pipe(**a ) lowerCAmelCase__ : Tuple = output.images lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a ) lowerCAmelCase__ : Union[str, Any] = [ tuple_pipeline_inputs['image'], tuple_pipeline_inputs['image'], ] lowerCAmelCase__ : str = pipe( **a , return_dict=a , )[0] lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) lowerCAmelCase__ : Union[str, Any] = np.array( [ 0.9_9_9_7, 0.9_9_8_9, 0.0_0_0_8, 0.0_0_2_1, 0.9_9_6_0, 0.0_0_1_8, 0.0_0_1_4, 0.0_0_0_2, 0.9_9_3_3, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Tuple = torch.device('cpu' ) class A__ : lowercase = 1 lowerCAmelCase__ : Optional[Any] = self.get_dummy_components() lowerCAmelCase__ : Dict = self.pipeline_class(**a ) lowerCAmelCase__ : Optional[Any] = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 ) lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype lowerCAmelCase__ : Union[str, Any] = 1 lowerCAmelCase__ : str = ( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) lowerCAmelCase__ : List[Any] = pipe.prepare_latents( a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() ) lowerCAmelCase__ : List[str] = ( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) lowerCAmelCase__ : Any = pipe.prepare_latents( a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() ) lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a ) lowerCAmelCase__ : Optional[int] = pipe( **a , decoder_latents=a , super_res_latents=a ).images lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a ) # Don't pass image, instead pass embedding lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' ) lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds lowerCAmelCase__ : List[Any] = pipe( **a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images # make sure passing text embeddings manually is identical assert np.abs(img_out_a - img_out_a ).max() < 1E-4 @skip_mps def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Tuple = torch_device == 'cpu' # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor lowerCAmelCase__ : int = 1E-2 self._test_attention_slicing_forward_pass( test_max_difference=a , expected_max_diff=a ) @skip_mps def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu' lowerCAmelCase__ : Any = True lowerCAmelCase__ : Optional[Any] = [ 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] self._test_inference_batch_single_identical( test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Tuple = [ 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes lowerCAmelCase__ : List[str] = [2, 3] self._test_inference_batch_consistent( batch_sizes=a , additional_params_copy_to_batched_inputs=a , ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=a ) @skip_mps def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' return super().test_save_load_local() @skip_mps def _lowerCamelCase ( self : str ): '''simple docstring''' return super().test_save_load_optional_components() @slow @require_torch_gpu class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Dict = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' ) lowerCAmelCase__ : List[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/unclip/karlo_v1_alpha_cat_variation_fp16.npy' ) lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained( 'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa ) lowerCAmelCase__ : Union[str, Any] = pipeline.to(a ) pipeline.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 ) lowerCAmelCase__ : List[str] = pipeline( a , generator=a , output_type='np' , ) lowerCAmelCase__ : Union[str, Any] = output.images[0] assert image.shape == (256, 256, 3) assert_mean_pixel_difference(a , a , 15 )
307
1
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) lowerCAmelCase__ : Any = (boundary[1] - boundary[0]) / steps lowerCAmelCase__ : int = boundary[0] lowerCAmelCase__ : Union[str, Any] = boundary[1] lowerCAmelCase__ : Dict = make_points(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : List[Any] = 0.0 y += (h / 2.0) * f(SCREAMING_SNAKE_CASE_ ) for i in x_i: # print(i) y += h * f(SCREAMING_SNAKE_CASE_ ) y += (h / 2.0) * f(SCREAMING_SNAKE_CASE_ ) return y def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: lowerCAmelCase__ : Any = a + h while x < (b - h): yield x lowerCAmelCase__ : int = x + h def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any: # enter your function here lowerCAmelCase__ : Any = (x - 0) * (x - 0) return y def lowerCAmelCase__ ( ) -> Any: lowerCAmelCase__ : Tuple = 0.0 # Lower bound of integration lowerCAmelCase__ : Dict = 1.0 # Upper bound of integration lowerCAmelCase__ : List[Any] = 10.0 # define number of steps or resolution lowerCAmelCase__ : str = [a, b] # define boundary of integration lowerCAmelCase__ : Union[str, Any] = method_a(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print(F'''y = {y}''' ) if __name__ == "__main__": main()
307
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str: stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ) return arr def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) ) # Recursively sort last 2/3 elements stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) ) # Recursively sort first 2/3 elements stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) ) if __name__ == "__main__": lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip() lowerCamelCase__ = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
307
1
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool: lowerCAmelCase__ : int = int(number**0.5 ) return number == sq * sq def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple[int, int]: lowerCAmelCase__ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den lowerCAmelCase__ : int = x_den * y_den * z_den lowerCAmelCase__ : int = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) top //= hcf bottom //= hcf return top, bottom def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 35 ) -> int: lowerCAmelCase__ : set = set() lowerCAmelCase__ : int lowerCAmelCase__ : Fraction = Fraction(0 ) lowerCAmelCase__ : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 lowerCAmelCase__ : Any = x_num * y_den + x_den * y_num lowerCAmelCase__ : Dict = x_den * y_den lowerCAmelCase__ : Optional[Any] = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowerCAmelCase__ : Dict = add_three( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) unique_s.add(SCREAMING_SNAKE_CASE_ ) # n=2 lowerCAmelCase__ : int = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) lowerCAmelCase__ : Tuple = x_den * x_den * y_den * y_den if is_sq(SCREAMING_SNAKE_CASE_ ) and is_sq(SCREAMING_SNAKE_CASE_ ): lowerCAmelCase__ : Optional[int] = int(sqrt(SCREAMING_SNAKE_CASE_ ) ) lowerCAmelCase__ : Tuple = int(sqrt(SCREAMING_SNAKE_CASE_ ) ) lowerCAmelCase__ : List[Any] = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowerCAmelCase__ : Union[str, Any] = add_three( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) unique_s.add(SCREAMING_SNAKE_CASE_ ) # n=-1 lowerCAmelCase__ : Tuple = x_num * y_num lowerCAmelCase__ : Union[str, Any] = x_den * y_num + x_num * y_den lowerCAmelCase__ : Optional[int] = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowerCAmelCase__ : Dict = add_three( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) unique_s.add(SCREAMING_SNAKE_CASE_ ) # n=2 lowerCAmelCase__ : Any = x_num * x_num * y_num * y_num lowerCAmelCase__ : Optional[Any] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(SCREAMING_SNAKE_CASE_ ) and is_sq(SCREAMING_SNAKE_CASE_ ): lowerCAmelCase__ : Tuple = int(sqrt(SCREAMING_SNAKE_CASE_ ) ) lowerCAmelCase__ : int = int(sqrt(SCREAMING_SNAKE_CASE_ ) ) lowerCAmelCase__ : List[str] = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowerCAmelCase__ : Any = add_three( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) unique_s.add(SCREAMING_SNAKE_CASE_ ) for num, den in unique_s: total += Fraction(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return total.denominator + total.numerator if __name__ == "__main__": print(F"""{solution() = }""")
307
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowerCamelCase__ = { """configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""], """processing_speech_to_text""": ["""Speech2TextProcessor"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""Speech2TextTokenizer"""] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""Speech2TextFeatureExtractor"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSpeech2TextForConditionalGeneration""", """TFSpeech2TextModel""", """TFSpeech2TextPreTrainedModel""", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Speech2TextForConditionalGeneration""", """Speech2TextModel""", """Speech2TextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
1
import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""", } class A__ ( __magic_name__ ): lowercase = 'align_text_model' def __init__( self : int , a : Dict=30_522 , a : int=768 , a : Tuple=12 , a : List[Any]=12 , a : List[Any]=3_072 , a : Optional[int]="gelu" , a : List[str]=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Any=2 , a : Union[str, Any]=0.0_2 , a : Any=1E-12 , a : Optional[int]=0 , a : Dict="absolute" , a : Dict=True , **a : Union[str, Any] , ): '''simple docstring''' super().__init__(**a ) lowerCAmelCase__ : str = vocab_size lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : int = num_hidden_layers lowerCAmelCase__ : int = num_attention_heads lowerCAmelCase__ : Any = hidden_act lowerCAmelCase__ : Optional[Any] = intermediate_size lowerCAmelCase__ : Optional[int] = hidden_dropout_prob lowerCAmelCase__ : Any = attention_probs_dropout_prob lowerCAmelCase__ : Dict = max_position_embeddings lowerCAmelCase__ : Tuple = type_vocab_size lowerCAmelCase__ : Optional[int] = initializer_range lowerCAmelCase__ : Optional[Any] = layer_norm_eps lowerCAmelCase__ : str = position_embedding_type lowerCAmelCase__ : List[str] = use_cache lowerCAmelCase__ : Optional[Any] = pad_token_id @classmethod def _lowerCamelCase ( cls : List[str] , a : Union[str, os.PathLike] , **a : Tuple ): '''simple docstring''' cls._set_token_in_kwargs(a ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = cls.get_config_dict(a , **a ) # get the text config dict if we are loading from AlignConfig if config_dict.get('model_type' ) == "align": lowerCAmelCase__ : str = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(a , **a ) class A__ ( __magic_name__ ): lowercase = 'align_vision_model' def __init__( self : Optional[Any] , a : int = 3 , a : int = 600 , a : float = 2.0 , a : float = 3.1 , a : int = 8 , a : List[int] = [3, 3, 5, 3, 5, 5, 3] , a : List[int] = [32, 16, 24, 40, 80, 112, 192] , a : List[int] = [16, 24, 40, 80, 112, 192, 320] , a : List[int] = [] , a : List[int] = [1, 2, 2, 2, 1, 2, 1] , a : List[int] = [1, 2, 2, 3, 3, 4, 1] , a : List[int] = [1, 6, 6, 6, 6, 6, 6] , a : float = 0.2_5 , a : str = "swish" , a : int = 2_560 , a : str = "mean" , a : float = 0.0_2 , a : float = 0.0_0_1 , a : float = 0.9_9 , a : float = 0.2 , **a : Tuple , ): '''simple docstring''' super().__init__(**a ) lowerCAmelCase__ : int = num_channels lowerCAmelCase__ : Tuple = image_size lowerCAmelCase__ : Optional[int] = width_coefficient lowerCAmelCase__ : str = depth_coefficient lowerCAmelCase__ : List[Any] = depth_divisor lowerCAmelCase__ : str = kernel_sizes lowerCAmelCase__ : List[Any] = in_channels lowerCAmelCase__ : Union[str, Any] = out_channels lowerCAmelCase__ : Optional[int] = depthwise_padding lowerCAmelCase__ : Dict = strides lowerCAmelCase__ : Any = num_block_repeats lowerCAmelCase__ : int = expand_ratios lowerCAmelCase__ : Dict = squeeze_expansion_ratio lowerCAmelCase__ : int = hidden_act lowerCAmelCase__ : Dict = hidden_dim lowerCAmelCase__ : Tuple = pooling_type lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : List[Any] = batch_norm_eps lowerCAmelCase__ : Tuple = batch_norm_momentum lowerCAmelCase__ : Any = drop_connect_rate lowerCAmelCase__ : int = sum(a ) * 4 @classmethod def _lowerCamelCase ( cls : List[str] , a : Union[str, os.PathLike] , **a : Dict ): '''simple docstring''' cls._set_token_in_kwargs(a ) lowerCAmelCase__ , lowerCAmelCase__ : int = cls.get_config_dict(a , **a ) # get the vision config dict if we are loading from AlignConfig if config_dict.get('model_type' ) == "align": lowerCAmelCase__ : Optional[Any] = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(a , **a ) class A__ ( __magic_name__ ): lowercase = 'align' lowercase = True def __init__( self : Tuple , a : str=None , a : List[Any]=None , a : Any=640 , a : Tuple=1.0 , a : Optional[Any]=0.0_2 , **a : Any , ): '''simple docstring''' super().__init__(**a ) if text_config is None: lowerCAmelCase__ : List[Any] = {} logger.info('text_config is None. Initializing the AlignTextConfig with default values.' ) if vision_config is None: lowerCAmelCase__ : Optional[Any] = {} logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' ) lowerCAmelCase__ : str = AlignTextConfig(**a ) lowerCAmelCase__ : Tuple = AlignVisionConfig(**a ) lowerCAmelCase__ : Dict = projection_dim lowerCAmelCase__ : Tuple = temperature_init_value lowerCAmelCase__ : List[Any] = initializer_range @classmethod def _lowerCamelCase ( cls : Optional[Any] , a : AlignTextConfig , a : AlignVisionConfig , **a : List[Any] ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : List[Any] = copy.deepcopy(self.__dict__ ) lowerCAmelCase__ : List[Any] = self.text_config.to_dict() lowerCAmelCase__ : Optional[int] = self.vision_config.to_dict() lowerCAmelCase__ : Dict = self.__class__.model_type return output
307
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = { 'task_specific_params': { 'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4}, 'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4}, 'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6}, } } lowerCAmelCase__ : int = { 'task_specific_params.summarization.length_penalty': 1.0, 'task_specific_params.summarization.max_length': 128, 'task_specific_params.summarization.min_length': 12, 'task_specific_params.summarization.num_beams': 4, 'task_specific_params.summarization_cnn.length_penalty': 2.0, 'task_specific_params.summarization_cnn.max_length': 142, 'task_specific_params.summarization_cnn.min_length': 56, 'task_specific_params.summarization_cnn.num_beams': 4, 'task_specific_params.summarization_xsum.length_penalty': 1.0, 'task_specific_params.summarization_xsum.max_length': 62, 'task_specific_params.summarization_xsum.min_length': 11, 'task_specific_params.summarization_xsum.num_beams': 6, } self.assertEqual(flatten_dict(a ) , a ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) ) lowerCAmelCase__ : List[str] = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 ) lowerCAmelCase__ : List[Any] = torch.tensor(a ) self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) ) lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : int = torch.tensor(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) ) @require_tf def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = np.random.randn(3 , 4 ) lowerCAmelCase__ : Any = tf.constant(a ) self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) ) lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : Dict = tf.constant(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) ) @require_flax def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 ) lowerCAmelCase__ : int = jnp.array(a ) self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) ) lowerCAmelCase__ : Any = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : str = jnp.array(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Any = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) ) lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) ) @require_torch def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 ) lowerCAmelCase__ : Dict = torch.tensor(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) ) lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : str = torch.tensor(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) ) @require_tf def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Dict = np.random.randn(3 , 4 ) lowerCAmelCase__ : List[Any] = tf.constant(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) ) lowerCAmelCase__ : Dict = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : Any = tf.constant(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) ) @require_flax def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Dict = np.random.randn(3 , 4 ) lowerCAmelCase__ : List[str] = jnp.array(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) ) lowerCAmelCase__ : str = np.random.randn(3 , 4 , 5 ) lowerCAmelCase__ : Union[str, Any] = jnp.array(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) ) lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) ) @require_torch def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 ) lowerCAmelCase__ : str = torch.tensor(a ) self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) ) lowerCAmelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 ) lowerCAmelCase__ : Dict = torch.tensor(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) ) @require_tf def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 ) lowerCAmelCase__ : Any = tf.constant(a ) self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) ) lowerCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 ) lowerCAmelCase__ : str = tf.constant(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) ) @require_flax def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : List[str] = np.random.randn(1 , 3 , 4 ) lowerCAmelCase__ : Union[str, Any] = jnp.array(a ) self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) ) lowerCAmelCase__ : str = np.random.randn(1 , 4 , 1 , 5 ) lowerCAmelCase__ : Optional[Any] = jnp.array(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) ) @require_torch def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : str = np.random.randn(3 , 4 ) lowerCAmelCase__ : str = torch.tensor(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) ) @require_tf def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Tuple = np.random.randn(3 , 4 ) lowerCAmelCase__ : Any = tf.constant(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) ) @require_flax def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : int = np.random.randn(3 , 4 ) lowerCAmelCase__ : Tuple = jnp.array(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
307
1
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class A__ ( __magic_name__ ): lowercase = 'informer' lowercase = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self : Optional[Any] , a : Optional[int] = None , a : Optional[int] = None , a : str = "student_t" , a : str = "nll" , a : int = 1 , a : List[int] = None , a : Optional[Union[str, bool]] = "mean" , a : int = 0 , a : int = 0 , a : int = 0 , a : int = 0 , a : Optional[List[int]] = None , a : Optional[List[int]] = None , a : int = 64 , a : int = 32 , a : int = 32 , a : int = 2 , a : int = 2 , a : int = 2 , a : int = 2 , a : bool = True , a : str = "gelu" , a : float = 0.0_5 , a : float = 0.1 , a : float = 0.1 , a : float = 0.1 , a : float = 0.1 , a : int = 100 , a : float = 0.0_2 , a : Optional[int]=True , a : str = "prob" , a : int = 5 , a : bool = True , **a : int , ): '''simple docstring''' lowerCAmelCase__ : Any = prediction_length lowerCAmelCase__ : str = context_length or prediction_length lowerCAmelCase__ : Dict = distribution_output lowerCAmelCase__ : int = loss lowerCAmelCase__ : Any = input_size lowerCAmelCase__ : List[str] = num_time_features lowerCAmelCase__ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowerCAmelCase__ : Optional[int] = scaling lowerCAmelCase__ : Any = num_dynamic_real_features lowerCAmelCase__ : List[Any] = num_static_real_features lowerCAmelCase__ : int = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(a ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) lowerCAmelCase__ : Union[str, Any] = cardinality else: lowerCAmelCase__ : int = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(a ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) lowerCAmelCase__ : str = embedding_dimension else: lowerCAmelCase__ : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowerCAmelCase__ : Any = num_parallel_samples # Transformer architecture configuration lowerCAmelCase__ : List[str] = input_size * len(self.lags_sequence ) + self._number_of_features lowerCAmelCase__ : List[Any] = d_model lowerCAmelCase__ : str = encoder_attention_heads lowerCAmelCase__ : Optional[int] = decoder_attention_heads lowerCAmelCase__ : List[Any] = encoder_ffn_dim lowerCAmelCase__ : List[str] = decoder_ffn_dim lowerCAmelCase__ : List[Any] = encoder_layers lowerCAmelCase__ : Dict = decoder_layers lowerCAmelCase__ : str = dropout lowerCAmelCase__ : Any = attention_dropout lowerCAmelCase__ : Any = activation_dropout lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop lowerCAmelCase__ : Optional[Any] = decoder_layerdrop lowerCAmelCase__ : Optional[int] = activation_function lowerCAmelCase__ : int = init_std lowerCAmelCase__ : Optional[int] = use_cache # Informer lowerCAmelCase__ : Dict = attention_type lowerCAmelCase__ : Optional[int] = sampling_factor lowerCAmelCase__ : int = distil super().__init__(is_encoder_decoder=a , **a ) @property def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
307
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position lowerCamelCase__ = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip lowerCamelCase__ = concatenate_datasets lowerCamelCase__ = DownloadConfig lowerCamelCase__ = DownloadManager lowerCamelCase__ = DownloadMode lowerCamelCase__ = DownloadConfig lowerCamelCase__ = DownloadMode lowerCamelCase__ = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
307
1
import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( __magic_name__ , unittest.TestCase ): lowercase = KandinskyVaaPipeline lowercase = [ 'image_embeds', 'negative_image_embeds', ] lowercase = ['image_embeds', 'negative_image_embeds'] lowercase = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] lowercase = False @property def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return 32 @property def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' return 32 @property def _lowerCamelCase ( self : List[str] ): '''simple docstring''' return self.time_input_dim @property def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def _lowerCamelCase ( self : str ): '''simple docstring''' return 100 @property def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ : Optional[int] = { 'in_channels': 4, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } lowerCAmelCase__ : Tuple = UNetaDConditionModel(**a ) return model @property def _lowerCamelCase ( self : str ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _lowerCamelCase ( self : str ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.dummy_unet lowerCAmelCase__ : Optional[Any] = self.dummy_movq lowerCAmelCase__ : Tuple = DDIMScheduler( num_train_timesteps=1_000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=a , set_alpha_to_one=a , steps_offset=1 , prediction_type='epsilon' , thresholding=a , ) lowerCAmelCase__ : Union[str, Any] = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def _lowerCamelCase ( self : List[Any] , a : List[Any] , a : List[Any]=0 ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(a ) ).to(a ) lowerCAmelCase__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( a ) if str(a ).startswith('mps' ): lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a ) else: lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(a ) lowerCAmelCase__ : Dict = { 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = 'cpu' lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components() lowerCAmelCase__ : List[str] = self.pipeline_class(**a ) lowerCAmelCase__ : str = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : List[Any] = pipe(**self.get_dummy_inputs(a ) ) lowerCAmelCase__ : List[str] = output.images lowerCAmelCase__ : Optional[int] = pipe( **self.get_dummy_inputs(a ) , return_dict=a , )[0] lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1] lowerCAmelCase__ : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : Any = np.array( [0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' ) lowerCAmelCase__ : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(a ) lowerCAmelCase__ : str = KandinskyVaaPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa ) lowerCAmelCase__ : int = pipeline.to(a ) pipeline.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Any = 'red cat, 4k photo' lowerCAmelCase__ : Tuple = torch.Generator(device='cuda' ).manual_seed(0 ) lowerCAmelCase__ , lowerCAmelCase__ : Dict = pipe_prior( a , generator=a , num_inference_steps=5 , negative_prompt='' , ).to_tuple() lowerCAmelCase__ : List[Any] = torch.Generator(device='cuda' ).manual_seed(0 ) lowerCAmelCase__ : List[Any] = pipeline( image_embeds=a , negative_image_embeds=a , generator=a , num_inference_steps=100 , output_type='np' , ) lowerCAmelCase__ : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(a , a )
307
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( __magic_name__ , unittest.TestCase ): lowercase = DanceDiffusionPipeline lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS lowercase = PipelineTesterMixin.required_optional_params - { 'callback', 'latents', 'callback_steps', 'output_type', 'num_images_per_prompt', } lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS lowercase = False lowercase = False def _lowerCamelCase ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ : Optional[int] = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , ) lowerCAmelCase__ : Tuple = IPNDMScheduler() lowerCAmelCase__ : str = { 'unet': unet, 'scheduler': scheduler, } return components def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ): '''simple docstring''' if str(a ).startswith('mps' ): lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a ) else: lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a ) lowerCAmelCase__ : Optional[Any] = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 4, } return inputs def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : int = self.get_dummy_components() lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a ) lowerCAmelCase__ : Any = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a ) lowerCAmelCase__ : List[Any] = pipe(**a ) lowerCAmelCase__ : List[str] = output.audios lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def _lowerCamelCase ( self : str ): '''simple docstring''' return super().test_save_load_local() @skip_mps def _lowerCamelCase ( self : Tuple ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) @skip_mps def _lowerCamelCase ( self : List[str] ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def _lowerCamelCase ( self : Tuple ): '''simple docstring''' return super().test_attention_slicing_forward_pass() def _lowerCamelCase ( self : List[str] ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Tuple = torch_device lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ) lowerCAmelCase__ : List[str] = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 ) lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 ) lowerCAmelCase__ : int = output.audios lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : str = torch_device lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa ) lowerCAmelCase__ : Optional[int] = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : str = torch.manual_seed(0 ) lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 ) lowerCAmelCase__ : str = output.audios lowerCAmelCase__ : Tuple = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
307
1
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list: if len(SCREAMING_SNAKE_CASE_ ) < 2: return collection def circle_sort_util(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool: lowerCAmelCase__ : str = False if low == high: return swapped lowerCAmelCase__ : Dict = low lowerCAmelCase__ : Any = high while left < right: if collection[left] > collection[right]: lowerCAmelCase__ , lowerCAmelCase__ : int = ( collection[right], collection[left], ) lowerCAmelCase__ : Optional[Any] = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: lowerCAmelCase__ , lowerCAmelCase__ : Dict = ( collection[right + 1], collection[left], ) lowerCAmelCase__ : str = True lowerCAmelCase__ : int = low + int((high - low) / 2 ) lowerCAmelCase__ : Optional[int] = circle_sort_util(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Optional[int] = circle_sort_util(SCREAMING_SNAKE_CASE_ , mid + 1 , SCREAMING_SNAKE_CASE_ ) return swapped or left_swap or right_swap lowerCAmelCase__ : Any = True while is_not_sorted is True: lowerCAmelCase__ : Any = circle_sort_util(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ) return collection if __name__ == "__main__": lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip() lowerCamelCase__ = [int(item) for item in user_input.split(""",""")] print(circle_sort(unsorted))
307
from ..utils import DummyObject, requires_backends class A__ ( metaclass=__magic_name__ ): lowercase = ['torch', 'transformers', 'onnx'] def __init__( self : Any , *a : Any , **a : Any ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class A__ ( metaclass=__magic_name__ ): lowercase = ['torch', 'transformers', 'onnx'] def __init__( self : str , *a : Any , **a : Optional[Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class A__ ( metaclass=__magic_name__ ): lowercase = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[int] , *a : List[Any] , **a : str ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class A__ ( metaclass=__magic_name__ ): lowercase = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *a : Dict , **a : List[str] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class A__ ( metaclass=__magic_name__ ): lowercase = ['torch', 'transformers', 'onnx'] def __init__( self : Dict , *a : str , **a : Union[str, Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class A__ ( metaclass=__magic_name__ ): lowercase = ['torch', 'transformers', 'onnx'] def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] )
307
1
from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""", # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class A__ ( __magic_name__ ): lowercase = 'mctct' def __init__( self : List[Any] , a : Optional[int]=8_065 , a : List[Any]=1_536 , a : List[str]=36 , a : Union[str, Any]=6_144 , a : Optional[Any]=4 , a : str=384 , a : int=920 , a : Any=1E-5 , a : Optional[Any]=0.3 , a : List[Any]="relu" , a : Union[str, Any]=0.0_2 , a : Union[str, Any]=0.3 , a : Any=0.3 , a : Any=1 , a : Optional[Any]=0 , a : str=2 , a : Any=1 , a : Tuple=0.3 , a : Any=1 , a : str=(7,) , a : Tuple=(3,) , a : Dict=80 , a : Dict=1 , a : str=None , a : Dict="sum" , a : List[Any]=False , **a : Optional[Any] , ): '''simple docstring''' super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a ) lowerCAmelCase__ : Tuple = vocab_size lowerCAmelCase__ : str = hidden_size lowerCAmelCase__ : str = num_hidden_layers lowerCAmelCase__ : Optional[Any] = intermediate_size lowerCAmelCase__ : List[Any] = num_attention_heads lowerCAmelCase__ : Optional[int] = attention_head_dim lowerCAmelCase__ : List[str] = max_position_embeddings lowerCAmelCase__ : Union[str, Any] = layer_norm_eps lowerCAmelCase__ : Union[str, Any] = layerdrop lowerCAmelCase__ : Union[str, Any] = hidden_act lowerCAmelCase__ : Dict = initializer_range lowerCAmelCase__ : str = hidden_dropout_prob lowerCAmelCase__ : Any = attention_probs_dropout_prob lowerCAmelCase__ : int = pad_token_id lowerCAmelCase__ : int = bos_token_id lowerCAmelCase__ : Any = eos_token_id lowerCAmelCase__ : Union[str, Any] = conv_glu_dim lowerCAmelCase__ : int = conv_dropout lowerCAmelCase__ : List[str] = num_conv_layers lowerCAmelCase__ : Union[str, Any] = input_feat_per_channel lowerCAmelCase__ : List[Any] = input_channels lowerCAmelCase__ : List[str] = conv_channels lowerCAmelCase__ : Tuple = ctc_loss_reduction lowerCAmelCase__ : Union[str, Any] = ctc_zero_infinity # prevents config testing fail with exporting to json lowerCAmelCase__ : Optional[Any] = list(a ) lowerCAmelCase__ : str = list(a ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` ' f'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
307
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class A__ : def __init__( self : List[str] , a : Any , a : Dict=13 , a : Optional[Any]=7 , a : Tuple=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=True , a : Dict=99 , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : Union[str, Any]=37 , a : Any="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Union[str, Any]=16 , a : Optional[int]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Optional[int]=None , ): '''simple docstring''' lowerCAmelCase__ : List[str] = parent lowerCAmelCase__ : str = batch_size lowerCAmelCase__ : Optional[int] = seq_length lowerCAmelCase__ : Optional[Any] = is_training lowerCAmelCase__ : Tuple = use_input_mask lowerCAmelCase__ : List[Any] = use_token_type_ids lowerCAmelCase__ : str = use_labels lowerCAmelCase__ : Dict = vocab_size lowerCAmelCase__ : Union[str, Any] = hidden_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : List[Any] = num_attention_heads lowerCAmelCase__ : int = intermediate_size lowerCAmelCase__ : Union[str, Any] = hidden_act lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : Any = attention_probs_dropout_prob lowerCAmelCase__ : Dict = max_position_embeddings lowerCAmelCase__ : int = type_vocab_size lowerCAmelCase__ : int = type_sequence_label_size lowerCAmelCase__ : Dict = initializer_range lowerCAmelCase__ : List[str] = num_labels lowerCAmelCase__ : Any = num_choices lowerCAmelCase__ : List[Any] = scope def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : Tuple = None if self.use_input_mask: lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : List[str] = None if self.use_token_type_ids: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ : Optional[Any] = None lowerCAmelCase__ : Dict = None lowerCAmelCase__ : str = None if self.use_labels: lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : List[str] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , ) def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Dict , a : str ): '''simple docstring''' lowerCAmelCase__ : str = LlamaModel(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Dict = model(a , attention_mask=a ) lowerCAmelCase__ : Union[str, Any] = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Dict , a : Tuple , ): '''simple docstring''' lowerCAmelCase__ : int = True lowerCAmelCase__ : Dict = LlamaModel(a ) model.to(a ) model.eval() lowerCAmelCase__ : List[Any] = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , ) lowerCAmelCase__ : Optional[int] = model( a , attention_mask=a , encoder_hidden_states=a , ) lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] , a : Any , a : List[str] , a : List[str] , ): '''simple docstring''' lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a ) model.to(a ) model.eval() lowerCAmelCase__ : Tuple = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : str , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , ): '''simple docstring''' lowerCAmelCase__ : str = True lowerCAmelCase__ : Optional[int] = True lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a ) model.to(a ) model.eval() # first forward pass lowerCAmelCase__ : List[str] = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , ) lowerCAmelCase__ : List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase__ : Union[str, Any] = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0] lowerCAmelCase__ : Union[str, Any] = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0] # select random slice lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Any = config_and_inputs lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () lowercase = (LlamaForCausalLM,) if is_torch_available() else () lowercase = ( { 'feature-extraction': LlamaModel, 'text-classification': LlamaForSequenceClassification, 'text-generation': LlamaForCausalLM, 'zero-shot': LlamaForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = False def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Tuple = LlamaModelTester(self ) lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=37 ) def _lowerCamelCase ( self : str ): '''simple docstring''' self.config_tester.run_common_tests() def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase__ : int = type self.model_tester.create_and_check_model(*a ) def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : int = 3 lowerCAmelCase__ : Dict = input_dict['input_ids'] lowerCAmelCase__ : Optional[Any] = input_ids.ne(1 ).to(a ) lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase__ : Tuple = LlamaForSequenceClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : List[Any] = 3 lowerCAmelCase__ : List[str] = 'single_label_classification' lowerCAmelCase__ : List[Any] = input_dict['input_ids'] lowerCAmelCase__ : List[Any] = input_ids.ne(1 ).to(a ) lowerCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase__ : int = LlamaForSequenceClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Optional[Any] = 3 lowerCAmelCase__ : Optional[Any] = 'multi_label_classification' lowerCAmelCase__ : List[str] = input_dict['input_ids'] lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(a ) lowerCAmelCase__ : Any = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCAmelCase__ : Dict = LlamaForSequenceClassification(a ) model.to(a ) model.eval() lowerCAmelCase__ : Dict = model(a , attention_mask=a , labels=a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' pass @parameterized.expand([('linear',), ('dynamic',)] ) def _lowerCamelCase ( self : Optional[int] , a : Dict ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size ) lowerCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase__ : List[Any] = LlamaModel(a ) original_model.to(a ) original_model.eval() lowerCAmelCase__ : List[Any] = original_model(a ).last_hidden_state lowerCAmelCase__ : str = original_model(a ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase__ : Any = {'type': scaling_type, 'factor': 1_0.0} lowerCAmelCase__ : Union[str, Any] = LlamaModel(a ) scaled_model.to(a ) scaled_model.eval() lowerCAmelCase__ : Union[str, Any] = scaled_model(a ).last_hidden_state lowerCAmelCase__ : Optional[int] = scaled_model(a ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(a , a , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(a , a , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(a , a , atol=1E-5 ) ) @require_torch class A__ ( unittest.TestCase ): @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' ) lowerCAmelCase__ : Any = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 lowerCAmelCase__ : Dict = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] ) torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase__ : List[Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] lowerCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' ) lowerCAmelCase__ : List[str] = model(torch.tensor(a ) ) # Expected mean on dim = -1 lowerCAmelCase__ : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] ) torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase__ : Any = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] lowerCAmelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' ) lowerCAmelCase__ : str = model(torch.tensor(a ) ) # Expected mean on dim = -1 lowerCAmelCase__ : str = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] ) torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase__ : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' ) lowerCAmelCase__ : List[str] = model(torch.tensor(a ) ) lowerCAmelCase__ : int = torch.tensor( [[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 ) # fmt: off lowerCAmelCase__ : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 ) @unittest.skip('Model is curently gated' ) @slow def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' lowerCAmelCase__ : Tuple = 'Simply put, the theory of relativity states that ' lowerCAmelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) lowerCAmelCase__ : Dict = tokenizer.encode(a , return_tensors='pt' ) lowerCAmelCase__ : str = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=a ) # greedy generation outputs lowerCAmelCase__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a ) lowerCAmelCase__ : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=a ) self.assertEqual(a , a )
307
1
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig lowerCamelCase__ = logging.get_logger(__name__) # General docstring lowerCamelCase__ = """MobileNetV1Config""" # Base docstring lowerCamelCase__ = """google/mobilenet_v1_1.0_224""" lowerCamelCase__ = [1, 1024, 7, 7] # Image classification docstring lowerCamelCase__ = """google/mobilenet_v1_1.0_224""" lowerCamelCase__ = """tabby, tabby cat""" lowerCamelCase__ = [ """google/mobilenet_v1_1.0_224""", """google/mobilenet_v1_0.75_192""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Optional[int]: lowerCAmelCase__ : Optional[Any] = {} if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowerCAmelCase__ : str = model.mobilenet_va else: lowerCAmelCase__ : str = model lowerCAmelCase__ : List[Any] = 'MobilenetV1/Conv2d_0/' lowerCAmelCase__ : Tuple = backbone.conv_stem.convolution.weight lowerCAmelCase__ : int = backbone.conv_stem.normalization.bias lowerCAmelCase__ : Tuple = backbone.conv_stem.normalization.weight lowerCAmelCase__ : Union[str, Any] = backbone.conv_stem.normalization.running_mean lowerCAmelCase__ : Any = backbone.conv_stem.normalization.running_var for i in range(13 ): lowerCAmelCase__ : List[Any] = i + 1 lowerCAmelCase__ : Dict = i * 2 lowerCAmelCase__ : str = backbone.layer[pt_index] lowerCAmelCase__ : Dict = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/''' lowerCAmelCase__ : List[str] = pointer.convolution.weight lowerCAmelCase__ : List[Any] = pointer.normalization.bias lowerCAmelCase__ : List[str] = pointer.normalization.weight lowerCAmelCase__ : int = pointer.normalization.running_mean lowerCAmelCase__ : List[Any] = pointer.normalization.running_var lowerCAmelCase__ : Any = backbone.layer[pt_index + 1] lowerCAmelCase__ : Optional[Any] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/''' lowerCAmelCase__ : Tuple = pointer.convolution.weight lowerCAmelCase__ : Optional[int] = pointer.normalization.bias lowerCAmelCase__ : str = pointer.normalization.weight lowerCAmelCase__ : Union[str, Any] = pointer.normalization.running_mean lowerCAmelCase__ : str = pointer.normalization.running_var if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowerCAmelCase__ : List[Any] = 'MobilenetV1/Logits/Conv2d_1c_1x1/' lowerCAmelCase__ : Optional[int] = model.classifier.weight lowerCAmelCase__ : int = model.classifier.bias return tf_to_pt_map def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model lowerCAmelCase__ : List[str] = tf.train.list_variables(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Any = {} for name, shape in init_vars: logger.info(F'''Loading TF weight {name} with shape {shape}''' ) lowerCAmelCase__ : List[Any] = tf.train.load_variable(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Any = array # Build TF to PyTorch weights loading map lowerCAmelCase__ : Dict = _build_tf_to_pytorch_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for name, pointer in tf_to_pt_map.items(): logger.info(F'''Importing {name}''' ) if name not in tf_weights: logger.info(F'''{name} not in tf pre-trained weights, skipping''' ) continue lowerCAmelCase__ : Optional[Any] = tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) lowerCAmelCase__ : Optional[int] = np.transpose(SCREAMING_SNAKE_CASE_ , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer lowerCAmelCase__ : Optional[int] = array.squeeze().transpose() else: lowerCAmelCase__ : Optional[int] = np.transpose(SCREAMING_SNAKE_CASE_ , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' ) logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' ) lowerCAmelCase__ : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ) tf_weights.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) tf_weights.pop(name + '/RMSProp' , SCREAMING_SNAKE_CASE_ ) tf_weights.pop(name + '/RMSProp_1' , SCREAMING_SNAKE_CASE_ ) tf_weights.pop(name + '/ExponentialMovingAverage' , SCREAMING_SNAKE_CASE_ ) logger.info(F'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' ) return model def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> torch.Tensor: lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = features.shape[-2:] lowerCAmelCase__ , lowerCAmelCase__ : int = conv_layer.stride lowerCAmelCase__ , lowerCAmelCase__ : int = conv_layer.kernel_size if in_height % stride_height == 0: lowerCAmelCase__ : str = max(kernel_height - stride_height , 0 ) else: lowerCAmelCase__ : Any = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: lowerCAmelCase__ : Any = max(kernel_width - stride_width , 0 ) else: lowerCAmelCase__ : int = max(kernel_width - (in_width % stride_width) , 0 ) lowerCAmelCase__ : Tuple = pad_along_width // 2 lowerCAmelCase__ : List[Any] = pad_along_width - pad_left lowerCAmelCase__ : Any = pad_along_height // 2 lowerCAmelCase__ : Dict = pad_along_height - pad_top lowerCAmelCase__ : Optional[Any] = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'constant' , 0.0 ) class A__ ( nn.Module ): def __init__( self : int , a : MobileNetVaConfig , a : int , a : int , a : int , a : Optional[int] = 1 , a : Optional[int] = 1 , a : bool = False , a : Optional[bool] = True , a : Optional[bool or str] = True , ): '''simple docstring''' super().__init__() lowerCAmelCase__ : int = config if in_channels % groups != 0: raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' ) if out_channels % groups != 0: raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' ) lowerCAmelCase__ : int = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) lowerCAmelCase__ : Tuple = nn.Convad( in_channels=a , out_channels=a , kernel_size=a , stride=a , padding=a , groups=a , bias=a , padding_mode='zeros' , ) if use_normalization: lowerCAmelCase__ : Tuple = nn.BatchNormad( num_features=a , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=a , track_running_stats=a , ) else: lowerCAmelCase__ : Dict = None if use_activation: if isinstance(a , a ): lowerCAmelCase__ : str = ACTaFN[use_activation] elif isinstance(config.hidden_act , a ): lowerCAmelCase__ : Any = ACTaFN[config.hidden_act] else: lowerCAmelCase__ : List[str] = config.hidden_act else: lowerCAmelCase__ : List[Any] = None def _lowerCamelCase ( self : str , a : torch.Tensor ): '''simple docstring''' if self.config.tf_padding: lowerCAmelCase__ : int = apply_tf_padding(a , self.convolution ) lowerCAmelCase__ : int = self.convolution(a ) if self.normalization is not None: lowerCAmelCase__ : Union[str, Any] = self.normalization(a ) if self.activation is not None: lowerCAmelCase__ : int = self.activation(a ) return features class A__ ( __magic_name__ ): lowercase = MobileNetVaConfig lowercase = load_tf_weights_in_mobilenet_va lowercase = 'mobilenet_v1' lowercase = 'pixel_values' lowercase = False def _lowerCamelCase ( self : Any , a : Union[nn.Linear, nn.Convad] ): '''simple docstring''' if isinstance(a , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(a , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) lowerCamelCase__ = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ lowerCamelCase__ = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( 'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , __magic_name__ , ) class A__ ( __magic_name__ ): def __init__( self : Any , a : MobileNetVaConfig , a : bool = True ): '''simple docstring''' super().__init__(a ) lowerCAmelCase__ : int = config lowerCAmelCase__ : str = 32 lowerCAmelCase__ : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth ) lowerCAmelCase__ : List[Any] = MobileNetVaConvLayer( a , in_channels=config.num_channels , out_channels=a , kernel_size=3 , stride=2 , ) lowerCAmelCase__ : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] lowerCAmelCase__ : Optional[int] = nn.ModuleList() for i in range(13 ): lowerCAmelCase__ : int = out_channels if strides[i] == 2 or i == 0: depth *= 2 lowerCAmelCase__ : List[str] = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( a , in_channels=a , out_channels=a , kernel_size=3 , stride=strides[i] , groups=a , ) ) self.layer.append( MobileNetVaConvLayer( a , in_channels=a , out_channels=a , kernel_size=1 , ) ) lowerCAmelCase__ : str = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def _lowerCamelCase ( self : Tuple , a : Optional[int] ): '''simple docstring''' raise NotImplementedError @add_start_docstrings_to_model_forward(a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _lowerCamelCase ( self : str , a : Optional[torch.Tensor] = None , a : Optional[bool] = None , a : Optional[bool] = None , ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase__ : int = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) lowerCAmelCase__ : Dict = self.conv_stem(a ) lowerCAmelCase__ : Dict = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): lowerCAmelCase__ : List[str] = layer_module(a ) if output_hidden_states: lowerCAmelCase__ : List[str] = all_hidden_states + (hidden_states,) lowerCAmelCase__ : List[str] = hidden_states if self.pooler is not None: lowerCAmelCase__ : Union[str, Any] = torch.flatten(self.pooler(a ) , start_dim=1 ) else: lowerCAmelCase__ : Union[str, Any] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=a , pooler_output=a , hidden_states=a , ) @add_start_docstrings( '\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __magic_name__ , ) class A__ ( __magic_name__ ): def __init__( self : Dict , a : MobileNetVaConfig ): '''simple docstring''' super().__init__(a ) lowerCAmelCase__ : Any = config.num_labels lowerCAmelCase__ : Dict = MobileNetVaModel(a ) lowerCAmelCase__ : Tuple = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head lowerCAmelCase__ : str = nn.Dropout(config.classifier_dropout_prob , inplace=a ) lowerCAmelCase__ : Optional[Any] = nn.Linear(a , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _lowerCamelCase ( self : int , a : Optional[torch.Tensor] = None , a : Optional[bool] = None , a : Optional[torch.Tensor] = None , a : Optional[bool] = None , ): '''simple docstring''' lowerCAmelCase__ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ : Optional[Any] = self.mobilenet_va(a , output_hidden_states=a , return_dict=a ) lowerCAmelCase__ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1] lowerCAmelCase__ : Tuple = self.classifier(self.dropout(a ) ) lowerCAmelCase__ : str = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCAmelCase__ : Tuple = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCAmelCase__ : str = 'single_label_classification' else: lowerCAmelCase__ : Union[str, Any] = 'multi_label_classification' if self.config.problem_type == "regression": lowerCAmelCase__ : Optional[int] = MSELoss() if self.num_labels == 1: lowerCAmelCase__ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowerCAmelCase__ : List[Any] = loss_fct(a , a ) elif self.config.problem_type == "single_label_classification": lowerCAmelCase__ : Any = CrossEntropyLoss() lowerCAmelCase__ : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCAmelCase__ : Optional[Any] = BCEWithLogitsLoss() lowerCAmelCase__ : Any = loss_fct(a , a ) if not return_dict: lowerCAmelCase__ : int = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=a , logits=a , hidden_states=outputs.hidden_states , )
307
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """microsoft/unispeech-large-1500h-cv""": ( """https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json""" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class A__ ( __magic_name__ ): lowercase = 'unispeech' def __init__( self : Any , a : List[Any]=32 , a : List[Any]=768 , a : Any=12 , a : List[str]=12 , a : List[Any]=3_072 , a : Any="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : List[str]=0.1 , a : Union[str, Any]=0.0 , a : str=0.0 , a : int=0.1 , a : List[str]=0.1 , a : List[Any]=0.0_2 , a : Optional[int]=1E-5 , a : Optional[int]="group" , a : Optional[Any]="gelu" , a : List[Any]=(512, 512, 512, 512, 512, 512, 512) , a : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , a : List[str]=(10, 3, 3, 3, 3, 2, 2) , a : Union[str, Any]=False , a : Union[str, Any]=128 , a : Tuple=16 , a : Dict=False , a : str=True , a : str=0.0_5 , a : Union[str, Any]=10 , a : Tuple=2 , a : int=0.0 , a : Optional[Any]=10 , a : List[str]=0 , a : str=320 , a : List[str]=2 , a : Optional[Any]=0.1 , a : Any=100 , a : Dict=256 , a : Any=256 , a : Dict=0.1 , a : List[Any]="mean" , a : Dict=False , a : str=False , a : Optional[int]=256 , a : Any=80 , a : List[Any]=0 , a : Optional[int]=1 , a : int=2 , a : List[Any]=0.5 , **a : int , ): '''simple docstring''' super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a ) lowerCAmelCase__ : List[str] = hidden_size lowerCAmelCase__ : List[str] = feat_extract_norm lowerCAmelCase__ : Optional[Any] = feat_extract_activation lowerCAmelCase__ : str = list(a ) lowerCAmelCase__ : List[str] = list(a ) lowerCAmelCase__ : Tuple = list(a ) lowerCAmelCase__ : Dict = conv_bias lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings lowerCAmelCase__ : Any = num_conv_pos_embedding_groups lowerCAmelCase__ : str = len(self.conv_dim ) lowerCAmelCase__ : Any = num_hidden_layers lowerCAmelCase__ : Dict = intermediate_size lowerCAmelCase__ : Dict = hidden_act lowerCAmelCase__ : Union[str, Any] = num_attention_heads lowerCAmelCase__ : Union[str, Any] = hidden_dropout lowerCAmelCase__ : Tuple = attention_dropout lowerCAmelCase__ : str = activation_dropout lowerCAmelCase__ : Any = feat_proj_dropout lowerCAmelCase__ : List[Any] = final_dropout lowerCAmelCase__ : Tuple = layerdrop lowerCAmelCase__ : Any = layer_norm_eps lowerCAmelCase__ : Dict = initializer_range lowerCAmelCase__ : Optional[Any] = num_ctc_classes lowerCAmelCase__ : Tuple = vocab_size lowerCAmelCase__ : Dict = do_stable_layer_norm lowerCAmelCase__ : List[Any] = use_weighted_layer_sum lowerCAmelCase__ : Any = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ : Union[str, Any] = apply_spec_augment lowerCAmelCase__ : Any = mask_time_prob lowerCAmelCase__ : Dict = mask_time_length lowerCAmelCase__ : Tuple = mask_time_min_masks lowerCAmelCase__ : Optional[int] = mask_feature_prob lowerCAmelCase__ : Optional[Any] = mask_feature_length lowerCAmelCase__ : int = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCAmelCase__ : int = num_codevectors_per_group lowerCAmelCase__ : Any = num_codevector_groups lowerCAmelCase__ : Any = contrastive_logits_temperature lowerCAmelCase__ : int = feat_quantizer_dropout lowerCAmelCase__ : List[Any] = num_negatives lowerCAmelCase__ : List[str] = codevector_dim lowerCAmelCase__ : Optional[int] = proj_codevector_dim lowerCAmelCase__ : Dict = diversity_loss_weight # ctc loss lowerCAmelCase__ : Any = ctc_loss_reduction lowerCAmelCase__ : Any = ctc_zero_infinity # pretraining loss lowerCAmelCase__ : Union[str, Any] = replace_prob @property def _lowerCamelCase ( self : List[str] ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
307
1
from collections.abc import Sequence from queue import Queue class A__ : def __init__( self : List[str] , a : Dict , a : str , a : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None ): '''simple docstring''' lowerCAmelCase__ : Tuple = start lowerCAmelCase__ : Tuple = end lowerCAmelCase__ : Optional[int] = val lowerCAmelCase__ : List[Any] = (start + end) // 2 lowerCAmelCase__ : Dict = left lowerCAmelCase__ : int = right def __repr__( self : Optional[int] ): '''simple docstring''' return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})''' class A__ : def __init__( self : List[str] , a : Sequence , a : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : int = collection lowerCAmelCase__ : str = function if self.collection: lowerCAmelCase__ : Optional[Any] = self._build_tree(0 , len(a ) - 1 ) def _lowerCamelCase ( self : int , a : Union[str, Any] , a : str ): '''simple docstring''' self._update_tree(self.root , a , a ) def _lowerCamelCase ( self : int , a : Union[str, Any] , a : List[str] ): '''simple docstring''' return self._query_range(self.root , a , a ) def _lowerCamelCase ( self : List[Any] , a : List[str] , a : List[str] ): '''simple docstring''' if start == end: return SegmentTreeNode(a , a , self.collection[start] ) lowerCAmelCase__ : int = (start + end) // 2 lowerCAmelCase__ : str = self._build_tree(a , a ) lowerCAmelCase__ : Optional[Any] = self._build_tree(mid + 1 , a ) return SegmentTreeNode(a , a , self.fn(left.val , right.val ) , a , a ) def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] , a : Optional[Any] ): '''simple docstring''' if node.start == i and node.end == i: lowerCAmelCase__ : int = val return if i <= node.mid: self._update_tree(node.left , a , a ) else: self._update_tree(node.right , a , a ) lowerCAmelCase__ : List[Any] = self.fn(node.left.val , node.right.val ) def _lowerCamelCase ( self : Union[str, Any] , a : int , a : Optional[Any] , a : Optional[Any] ): '''simple docstring''' if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , a , a ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , a , node.mid ) , self._query_range(node.right , node.mid + 1 , a ) , ) else: # range in right child tree return self._query_range(node.right , a , a ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' if self.root is not None: lowerCAmelCase__ : int = Queue() queue.put(self.root ) while not queue.empty(): lowerCAmelCase__ : Tuple = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print("""*""" * 50) lowerCamelCase__ = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
307
import torch from torch import nn class A__ ( nn.Module ): def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ): '''simple docstring''' super().__init__() lowerCAmelCase__ : Dict = n_token lowerCAmelCase__ : Any = d_embed lowerCAmelCase__ : str = d_proj lowerCAmelCase__ : int = cutoffs + [n_token] lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs lowerCAmelCase__ : str = div_val lowerCAmelCase__ : Tuple = self.cutoffs[0] lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1 lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters if self.n_clusters > 0: lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) ) lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) ) lowerCAmelCase__ : Optional[int] = nn.ModuleList() lowerCAmelCase__ : Tuple = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) ) else: self.out_projs.append(a ) self.out_layers.append(nn.Linear(a , a ) ) else: for i in range(len(self.cutoffs ) ): lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) ) self.out_layers.append(nn.Linear(a , r_idx - l_idx ) ) lowerCAmelCase__ : Tuple = keep_order def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ): '''simple docstring''' if proj is None: lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() ) lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ): '''simple docstring''' if labels is not None: # Shift so that tokens < n predict n lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous() lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous() lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) ) lowerCAmelCase__ : Tuple = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError('Input and labels should have the same size in the batch dimension.' ) else: lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) ) if self.n_clusters == 0: lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) if labels is not None: lowerCAmelCase__ : str = labels != -100 lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device ) lowerCAmelCase__ : List[str] = ( -nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 ) else: # construct weights and biases lowerCAmelCase__ , lowerCAmelCase__ : int = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx] lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx] else: lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias if i == 0: lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 ) lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(a ) biases.append(a ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0] lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a ) lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 ) if labels is None: lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device ) lowerCAmelCase__ : Tuple = 0 lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs for i in range(len(a ) - 1 ): lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1] if labels is not None: lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx) lowerCAmelCase__ : int = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx lowerCAmelCase__ : Any = head_logprob.index_select(0 , a ) lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a ) else: lowerCAmelCase__ : Any = hidden if i == 0: if labels is not None: lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 ) else: lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]] else: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i] lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a ) lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 ) lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None] ).squeeze(1 ) else: lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i lowerCAmelCase__ : Union[str, Any] = logprob_i if labels is not None: if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order: out.index_copy_(0 , a , -logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def _lowerCamelCase ( self : List[Any] , a : Any ): '''simple docstring''' if self.n_clusters == 0: lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) return nn.functional.log_softmax(a , dim=-1 ) else: # construct weights and biases lowerCAmelCase__ , lowerCAmelCase__ : str = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx] lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx] else: lowerCAmelCase__ : int = self.out_layers[i].weight lowerCAmelCase__ : int = self.out_layers[i].bias if i == 0: lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 ) lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(a ) biases.append(a ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0] lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a ) lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) ) lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 ) lowerCAmelCase__ : List[Any] = [0] + self.cutoffs for i in range(len(a ) - 1 ): lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1] if i == 0: lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]] else: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i] lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a ) lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 ) lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i lowerCAmelCase__ : List[str] = logprob_i return out
307
1
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""", } class A__ ( __magic_name__ ): lowercase = 't5' lowercase = ['past_key_values'] lowercase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : Dict , a : Dict=32_128 , a : Tuple=512 , a : str=64 , a : List[str]=2_048 , a : Optional[Any]=6 , a : Tuple=None , a : str=8 , a : int=32 , a : Dict=128 , a : List[Any]=0.1 , a : List[Any]=1E-6 , a : Dict=1.0 , a : List[str]="relu" , a : int=True , a : List[Any]=True , a : str=0 , a : Optional[int]=1 , **a : Optional[Any] , ): '''simple docstring''' lowerCAmelCase__ : Any = vocab_size lowerCAmelCase__ : int = d_model lowerCAmelCase__ : Optional[Any] = d_kv lowerCAmelCase__ : Dict = d_ff lowerCAmelCase__ : int = num_layers lowerCAmelCase__ : List[Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase__ : List[str] = num_heads lowerCAmelCase__ : Union[str, Any] = relative_attention_num_buckets lowerCAmelCase__ : int = relative_attention_max_distance lowerCAmelCase__ : List[str] = dropout_rate lowerCAmelCase__ : Dict = layer_norm_epsilon lowerCAmelCase__ : Optional[int] = initializer_factor lowerCAmelCase__ : Dict = feed_forward_proj lowerCAmelCase__ : Tuple = use_cache lowerCAmelCase__ : str = self.feed_forward_proj.split('-' ) lowerCAmelCase__ : Union[str, Any] = act_info[-1] lowerCAmelCase__ : Any = act_info[0] == 'gated' if len(a ) > 1 and act_info[0] != "gated" or len(a ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCAmelCase__ : Union[str, Any] = 'gelu_new' super().__init__( pad_token_id=a , eos_token_id=a , is_encoder_decoder=a , **a , ) class A__ ( __magic_name__ ): @property def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Any = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: lowerCAmelCase__ : Dict = 'past_encoder_sequence + sequence' lowerCAmelCase__ : int = {0: 'batch'} lowerCAmelCase__ : Union[str, Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: lowerCAmelCase__ : List[str] = {0: 'batch', 1: 'decoder_sequence'} lowerCAmelCase__ : Union[str, Any] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(a , direction='inputs' ) return common_inputs @property def _lowerCamelCase ( self : str ): '''simple docstring''' return 13
307
import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowerCamelCase__ = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("""""", """|""", """|"""), datarow=DataRow("""""", """|""", """|"""), padding=1, with_header_hide=None, ) lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}} lowerCamelCase__ = [ { """type""": """header""", """text""": { """type""": """plain_text""", """text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""", """emoji""": True, }, } ] lowerCamelCase__ = 0 for log in Path().glob("""*.log"""): lowerCamelCase__ = 0 with open(log, """r""") as f: for line in f: lowerCamelCase__ = json.loads(line) if line.get("""nodeid""", """""") != "": lowerCamelCase__ = line["""nodeid"""] if line.get("""duration""", None) is not None: lowerCamelCase__ = F"""{line["duration"]:.4f}""" if line.get("""outcome""", """""") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("""_""")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowerCamelCase__ = [] log.unlink() lowerCamelCase__ = """""" lowerCamelCase__ = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" lowerCamelCase__ = [] lowerCamelCase__ = {} for test in failed_tests: lowerCamelCase__ = test[0].split("""::""") lowerCamelCase__ = data[0].split("""/""")[-1] if data[0] not in filesafailed: lowerCamelCase__ = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowerCamelCase__ = [test[0] for test in failed_table] lowerCamelCase__ = list(set(files)) # Count number of instances in failed_tests lowerCamelCase__ = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowerCamelCase__ = tabulate( table, headers=["""Test Location""", """Num Failed"""], tablefmt=hf_table_format, stralign="""right""", ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3000: lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results.""" lowerCamelCase__ = len(err) + 10 lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}""" print(F"""### {message}""") else: lowerCamelCase__ = """No failed tests! 🤗""" print(F"""## {message}""") payload.append(no_error_payload) if os.environ.get("""TEST_TYPE""", """""") != "": from slack_sdk import WebClient lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""]) if message != "No failed tests! 🤗": lowerCamelCase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": message, }, } payload.append(md_report) lowerCamelCase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": """*For more details:*""", }, """accessory""": { """type""": """button""", """text""": { """type""": """plain_text""", """text""": """Check Action results""", """emoji""": True, }, """url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""", }, } payload.append(action_button) lowerCamelCase__ = { """type""": """context""", """elements""": [ { """type""": """plain_text""", """text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""", } ], } payload.append(date_report) lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload) lowerCamelCase__ = response.data["""ts"""] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowerCamelCase__ = """""" for i, row in enumerate(test_failures): if row[0] != test_class: lowerCamelCase__ = row[0] else: lowerCamelCase__ = """""" lowerCamelCase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""", }, } client.chat_postMessage( channel="""#accelerate-ci-daily""", thread_ts=ts, blocks=[payload], )
307
1
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( __magic_name__ , unittest.TestCase ): lowercase = DanceDiffusionPipeline lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS lowercase = PipelineTesterMixin.required_optional_params - { 'callback', 'latents', 'callback_steps', 'output_type', 'num_images_per_prompt', } lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS lowercase = False lowercase = False def _lowerCamelCase ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase__ : Optional[int] = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , ) lowerCAmelCase__ : Tuple = IPNDMScheduler() lowerCAmelCase__ : str = { 'unet': unet, 'scheduler': scheduler, } return components def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ): '''simple docstring''' if str(a ).startswith('mps' ): lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a ) else: lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a ) lowerCAmelCase__ : Optional[Any] = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 4, } return inputs def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : int = self.get_dummy_components() lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a ) lowerCAmelCase__ : Any = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a ) lowerCAmelCase__ : List[Any] = pipe(**a ) lowerCAmelCase__ : List[str] = output.audios lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def _lowerCamelCase ( self : str ): '''simple docstring''' return super().test_save_load_local() @skip_mps def _lowerCamelCase ( self : Tuple ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) @skip_mps def _lowerCamelCase ( self : List[str] ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def _lowerCamelCase ( self : Tuple ): '''simple docstring''' return super().test_attention_slicing_forward_pass() def _lowerCamelCase ( self : List[str] ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class A__ ( unittest.TestCase ): def _lowerCamelCase ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Tuple = torch_device lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ) lowerCAmelCase__ : List[str] = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 ) lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 ) lowerCAmelCase__ : int = output.audios lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : str = torch_device lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa ) lowerCAmelCase__ : Optional[int] = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowerCAmelCase__ : str = torch.manual_seed(0 ) lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 ) lowerCAmelCase__ : str = output.audios lowerCAmelCase__ : Tuple = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
307
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCamelCase__ = imread(r"""digital_image_processing/image_data/lena_small.jpg""") lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY) def lowerCAmelCase__ ( ) -> Dict: lowerCAmelCase__ : List[Any] = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ ) # assert negative_img array for at least one True assert negative_img.any() def lowerCAmelCase__ ( ) -> Optional[Any]: with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def lowerCAmelCase__ ( ) -> Tuple: lowerCAmelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCAmelCase__ ( ) -> Tuple: lowerCAmelCase__ : Tuple = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() lowerCAmelCase__ : Optional[Any] = canny.canny(SCREAMING_SNAKE_CASE_ ) # assert canny array for at least one True assert canny_array.any() def lowerCAmelCase__ ( ) -> Optional[int]: assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all() def lowerCAmelCase__ ( ) -> Dict: # laplace diagonals lowerCAmelCase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) lowerCAmelCase__ : int = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ ) assert res.any() def lowerCAmelCase__ ( ) -> List[str]: assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any() def lowerCAmelCase__ ( ) -> Any: lowerCAmelCase__ , lowerCAmelCase__ : str = sob.sobel_filter(SCREAMING_SNAKE_CASE_ ) assert grad.any() and theta.any() def lowerCAmelCase__ ( ) -> Any: lowerCAmelCase__ : int = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 ) assert sepia.all() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]: lowerCAmelCase__ : List[Any] = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any: lowerCAmelCase__ : Dict = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def lowerCAmelCase__ ( ) -> int: lowerCAmelCase__ : int = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. lowerCAmelCase__ : List[str] = imread(SCREAMING_SNAKE_CASE_ , 0 ) # Test for get_neighbors_pixel function() return not None lowerCAmelCase__ : str = 0 lowerCAmelCase__ : str = 0 lowerCAmelCase__ : List[str] = image[x_coordinate][y_coordinate] lowerCAmelCase__ : Dict = lbp.get_neighbors_pixel( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowerCAmelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): lowerCAmelCase__ : Dict = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert lbp_image.any()
307
1
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): raise ValueError('String lengths must match!' ) lowerCAmelCase__ : Optional[int] = 0 for chara, chara in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
307
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase__ = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
307
1
import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class A__ ( __magic_name__ , unittest.TestCase ): lowercase = RoFormerTokenizer lowercase = RoFormerTokenizerFast lowercase = True lowercase = True def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' super().setUp() def _lowerCamelCase ( self : Dict , **a : int ): '''simple docstring''' return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **a ) def _lowerCamelCase ( self : Dict , **a : Union[str, Any] ): '''simple docstring''' return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **a ) def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = '永和服装饰品有限公司,今天天气非常好' lowerCAmelCase__ : Union[str, Any] = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好' return input_text, output_text def _lowerCamelCase ( self : Tuple ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.get_tokenizer() lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.get_chinese_input_output_texts() lowerCAmelCase__ : Union[str, Any] = tokenizer.tokenize(a ) self.assertListEqual(a , output_text.split() ) lowerCAmelCase__ : Union[str, Any] = tokens + [tokenizer.unk_token] lowerCAmelCase__ : Optional[int] = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a ) def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.get_rust_tokenizer() lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_chinese_input_output_texts() lowerCAmelCase__ : Tuple = tokenizer.tokenize(a ) self.assertListEqual(a , output_text.split() ) lowerCAmelCase__ : List[Any] = tokens + [tokenizer.unk_token] lowerCAmelCase__ : Dict = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' pass def _lowerCamelCase ( self : List[str] ): '''simple docstring''' pass def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' pass
307
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]: if alpha_transform_type == "cosine": def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) lowerCAmelCase__ : Tuple = [] for i in range(SCREAMING_SNAKE_CASE_ ): lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) class A__ ( __magic_name__ , __magic_name__ ): lowercase = [e.name for e in KarrasDiffusionSchedulers] lowercase = 2 @register_to_config def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ): '''simple docstring''' if trained_betas is not None: lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa ) elif beta_schedule == "linear": lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowerCAmelCase__ : Union[str, Any] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' ) elif beta_schedule == "exp": lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' ) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' ) lowerCAmelCase__ : int = 1.0 - self.betas lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(a , a , a ) lowerCAmelCase__ : Optional[Any] = use_karras_sigmas def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ): '''simple docstring''' if schedule_timesteps is None: lowerCAmelCase__ : List[str] = self.timesteps lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0 else: lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep lowerCAmelCase__ : Tuple = self._index_counter[timestep_int] return indices[pos].item() @property def _lowerCamelCase ( self : Dict ): '''simple docstring''' if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ): '''simple docstring''' lowerCAmelCase__ : Tuple = self.index_for_timestep(a ) lowerCAmelCase__ : Any = self.sigmas[step_index] lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5) return sample def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ): '''simple docstring''' lowerCAmelCase__ : Any = num_inference_steps lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy() elif self.config.timestep_spacing == "leading": lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a ) timesteps -= 1 else: raise ValueError( f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) lowerCAmelCase__ : List[Any] = np.log(a ) lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a ) if self.config.use_karras_sigmas: lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps ) lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] ) lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a ) lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) lowerCAmelCase__ : Tuple = torch.from_numpy(a ) lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(a ).startswith('mps' ): # mps does not support float64 lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa ) else: lowerCAmelCase__ : Any = timesteps.to(device=a ) # empty dt and derivative lowerCAmelCase__ : str = None lowerCAmelCase__ : Optional[int] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter lowerCAmelCase__ : Optional[Any] = defaultdict(a ) def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = np.log(a ) # get distribution lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) lowerCAmelCase__ : List[str] = low_idx + 1 lowerCAmelCase__ : List[str] = log_sigmas[low_idx] lowerCAmelCase__ : Any = log_sigmas[high_idx] # interpolate sigmas lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high) lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 ) # transform interpolation to time range lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx lowerCAmelCase__ : Any = t.reshape(sigma.shape ) return t def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ): '''simple docstring''' lowerCAmelCase__ : float = in_sigmas[-1].item() lowerCAmelCase__ : float = in_sigmas[0].item() lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a ) lowerCAmelCase__ : Any = sigma_min ** (1 / rho) lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho) lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def _lowerCamelCase ( self : Any ): '''simple docstring''' return self.dt is None def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.index_for_timestep(a ) # advance index counter by 1 lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index] lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1] else: # 2nd order / Heun's method lowerCAmelCase__ : int = self.sigmas[step_index - 1] lowerCAmelCase__ : Any = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API lowerCAmelCase__ : Optional[int] = 0 lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next lowerCAmelCase__ : Any = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": lowerCAmelCase__ : int = model_output else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.config.clip_sample: lowerCAmelCase__ : str = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat # 3. delta timestep lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat # store for 2nd order step lowerCAmelCase__ : List[Any] = derivative lowerCAmelCase__ : str = dt lowerCAmelCase__ : Dict = sample else: # 2. 2nd order / Heun's method lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample lowerCAmelCase__ : Dict = self.dt lowerCAmelCase__ : Optional[int] = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" lowerCAmelCase__ : List[str] = None lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : str = None lowerCAmelCase__ : Tuple = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=a ) def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(a ): # mps does not support float64 lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa ) lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa ) else: lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device ) lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device ) lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps] lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): lowerCAmelCase__ : Any = sigma.unsqueeze(-1 ) lowerCAmelCase__ : List[str] = original_samples + noise * sigma return noisy_samples def __len__( self : int ): '''simple docstring''' return self.config.num_train_timesteps
307
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]: lowerCAmelCase__ : Dict = 384 lowerCAmelCase__ : Union[str, Any] = 7 if "tiny" in model_name: lowerCAmelCase__ : List[str] = 96 lowerCAmelCase__ : List[Any] = (2, 2, 6, 2) lowerCAmelCase__ : Any = (3, 6, 12, 24) elif "small" in model_name: lowerCAmelCase__ : Dict = 96 lowerCAmelCase__ : List[str] = (2, 2, 18, 2) lowerCAmelCase__ : str = (3, 6, 12, 24) elif "base" in model_name: lowerCAmelCase__ : Any = 128 lowerCAmelCase__ : Optional[int] = (2, 2, 18, 2) lowerCAmelCase__ : int = (4, 8, 16, 32) lowerCAmelCase__ : List[str] = 12 lowerCAmelCase__ : Optional[int] = 512 elif "large" in model_name: lowerCAmelCase__ : Tuple = 192 lowerCAmelCase__ : Tuple = (2, 2, 18, 2) lowerCAmelCase__ : Any = (6, 12, 24, 48) lowerCAmelCase__ : Dict = 12 lowerCAmelCase__ : Optional[int] = 768 # set label information lowerCAmelCase__ : Union[str, Any] = 150 lowerCAmelCase__ : Tuple = 'huggingface/label-files' lowerCAmelCase__ : str = 'ade20k-id2label.json' lowerCAmelCase__ : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) ) lowerCAmelCase__ : Any = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowerCAmelCase__ : Dict = {v: k for k, v in idalabel.items()} lowerCAmelCase__ : Optional[int] = SwinConfig( embed_dim=SCREAMING_SNAKE_CASE_ , depths=SCREAMING_SNAKE_CASE_ , num_heads=SCREAMING_SNAKE_CASE_ , window_size=SCREAMING_SNAKE_CASE_ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) lowerCAmelCase__ : Tuple = UperNetConfig( backbone_config=SCREAMING_SNAKE_CASE_ , auxiliary_in_channels=SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , ) return config def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str: lowerCAmelCase__ : int = [] # fmt: off # stem rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: lowerCAmelCase__ : List[Any] = dct.pop(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : int = val def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: lowerCAmelCase__ : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowerCAmelCase__ : Dict = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowerCAmelCase__ : Optional[Any] = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' ) lowerCAmelCase__ : int = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ : Union[str, Any] = in_proj_weight[:dim, :] lowerCAmelCase__ : Any = in_proj_bias[: dim] lowerCAmelCase__ : List[str] = in_proj_weight[ dim : dim * 2, : ] lowerCAmelCase__ : Any = in_proj_bias[ dim : dim * 2 ] lowerCAmelCase__ : int = in_proj_weight[ -dim :, : ] lowerCAmelCase__ : List[Any] = in_proj_bias[-dim :] # fmt: on def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]: lowerCAmelCase__ , lowerCAmelCase__ : Any = x.shape lowerCAmelCase__ : Tuple = x.reshape(SCREAMING_SNAKE_CASE_ , 4 , in_channel // 4 ) lowerCAmelCase__ : int = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return x def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ : int = x.shape lowerCAmelCase__ : Optional[int] = x.reshape(SCREAMING_SNAKE_CASE_ , in_channel // 4 , 4 ) lowerCAmelCase__ : Optional[int] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return x def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: lowerCAmelCase__ : Optional[Any] = x.shape[0] lowerCAmelCase__ : int = x.reshape(4 , in_channel // 4 ) lowerCAmelCase__ : int = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE_ ) return x def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int: lowerCAmelCase__ : str = x.shape[0] lowerCAmelCase__ : Union[str, Any] = x.reshape(in_channel // 4 , 4 ) lowerCAmelCase__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE_ ) return x def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]: lowerCAmelCase__ : str = { 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth', 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth', 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth', } lowerCAmelCase__ : Any = model_name_to_url[model_name] lowerCAmelCase__ : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' , file_name=SCREAMING_SNAKE_CASE_ )[ 'state_dict' ] for name, param in state_dict.items(): print(SCREAMING_SNAKE_CASE_ , param.shape ) lowerCAmelCase__ : List[Any] = get_upernet_config(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Any = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCAmelCase__ : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE_ ) if "bn" in key: lowerCAmelCase__ : Dict = key.replace('bn' , 'batch_norm' ) lowerCAmelCase__ : List[str] = val # rename keys lowerCAmelCase__ : Optional[int] = create_rename_keys(SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) read_in_q_k_v(SCREAMING_SNAKE_CASE_ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: lowerCAmelCase__ : Any = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE_ ) if "norm" in key: lowerCAmelCase__ : Any = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # verify on image lowerCAmelCase__ : Optional[Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' lowerCAmelCase__ : int = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert('RGB' ) lowerCAmelCase__ : str = SegformerImageProcessor() lowerCAmelCase__ : Tuple = processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values with torch.no_grad(): lowerCAmelCase__ : str = model(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : Union[str, Any] = outputs.logits print(logits.shape ) print('First values of logits:' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": lowerCAmelCase__ : Optional[int] = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": lowerCAmelCase__ : str = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": lowerCAmelCase__ : List[str] = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": lowerCAmelCase__ : str = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print(F'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(F'''openmmlab/{model_name}''' ) processor.push_to_hub(F'''openmmlab/{model_name}''' ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[F"""upernet-swin-{size}""" for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCamelCase__ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
307
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]: if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ): return x return (x, x) @require_tf class A__ : def _lowerCamelCase ( self : List[Any] , a : List[str] , a : Optional[Any] ): '''simple docstring''' pass def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' pass def _lowerCamelCase ( self : Dict ): '''simple docstring''' pass def _lowerCamelCase ( self : Dict , a : int , a : str , a : List[Any] , a : Dict , a : List[str]=None , **a : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(a , a ) lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel(a ) lowerCAmelCase__ : Tuple = model(input_ids=a , pixel_values=a , attention_mask=a ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) ) def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : Tuple , a : Dict , a : Union[str, Any] , a : List[Any]=None , **a : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_vision_text_model(a , a ) lowerCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a ) lowerCAmelCase__ : Optional[int] = model(input_ids=a , pixel_values=a , attention_mask=a ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : Any=None , **a : Dict ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a ) lowerCAmelCase__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model} lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**a ) lowerCAmelCase__ : Union[str, Any] = model(input_ids=a , pixel_values=a , attention_mask=a ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _lowerCamelCase ( self : Any , a : Optional[int] , a : Optional[int] , a : Dict , a : Optional[int] , a : Optional[int]=None , **a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : int = self.get_vision_text_model(a , a ) lowerCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=a , text_model=a ) lowerCAmelCase__ : List[str] = model(input_ids=a , pixel_values=a , attention_mask=a ) lowerCAmelCase__ : Union[str, Any] = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(a ) lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel.from_pretrained(a ) lowerCAmelCase__ : int = model(input_ids=a , pixel_values=a , attention_mask=a ) lowerCAmelCase__ : Union[str, Any] = after_output[0].numpy() lowerCAmelCase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(a , 1E-5 ) def _lowerCamelCase ( self : List[str] , a : Dict , a : Optional[int] , a : List[Any] , a : str , a : int=None , **a : Tuple ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a ) lowerCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=a , text_model=a ) lowerCAmelCase__ : str = model( input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a ) lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions self.assertEqual(len(a ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase__ : Optional[int] = to_atuple(vision_model.config.image_size ) lowerCAmelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size ) lowerCAmelCase__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCAmelCase__ : int = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCAmelCase__ : str = output.text_model_output.attentions self.assertEqual(len(a ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _lowerCamelCase ( self : List[Any] , a : np.ndarray , a : np.ndarray , a : float ): '''simple docstring''' lowerCAmelCase__ : int = np.abs((a - b) ).max() self.assertLessEqual(a , a , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : Dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**a ) def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : Any = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**a ) def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : str = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**a ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() self.check_save_load(**a ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**a ) @slow def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.get_pretrained_model_and_inputs() lowerCAmelCase__ : List[Any] = model_a(**a ) lowerCAmelCase__ : Optional[int] = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(a ) lowerCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(a ) lowerCAmelCase__ : List[str] = model_a(**a ) lowerCAmelCase__ : int = after_outputs[0].numpy() lowerCAmelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(a , 1E-5 ) @require_tf class A__ ( __magic_name__ , unittest.TestCase ): def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' ) lowerCAmelCase__ : int = 13 lowerCAmelCase__ : List[Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] ) lowerCAmelCase__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def _lowerCamelCase ( self : List[Any] , a : Dict , a : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = TFViTModel(a , name='vision_model' ) lowerCAmelCase__ : str = TFBertModel(a , name='text_model' ) return vision_model, text_model def _lowerCamelCase ( self : List[Any] ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = TFViTModelTester(self ) lowerCAmelCase__ : Tuple = TFBertModelTester(self ) lowerCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs() lowerCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = vision_config_and_inputs ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : str = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class A__ ( __magic_name__ , unittest.TestCase ): def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' ) lowerCAmelCase__ : Tuple = 13 lowerCAmelCase__ : Any = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCAmelCase__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCAmelCase__ : Any = random_attention_mask([batch_size, 4] ) lowerCAmelCase__ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def _lowerCamelCase ( self : str , a : Optional[Any] , a : Dict , a : Dict , a : Any , a : Any=None , **a : int ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_vision_text_model(a , a ) lowerCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=a , text_model=a ) lowerCAmelCase__ : Any = model( input_ids=a , pixel_values=a , attention_mask=a , output_attentions=a ) lowerCAmelCase__ : Union[str, Any] = output.vision_model_output.attentions self.assertEqual(len(a ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) lowerCAmelCase__ : str = to_atuple(vision_model.config.image_size ) lowerCAmelCase__ : Union[str, Any] = to_atuple(vision_model.config.patch_size ) lowerCAmelCase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCAmelCase__ : int = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCAmelCase__ : List[str] = output.text_model_output.attentions self.assertEqual(len(a ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _lowerCamelCase ( self : int , a : Optional[int] , a : int ): '''simple docstring''' lowerCAmelCase__ : Dict = TFDeiTModel(a , name='vision_model' ) lowerCAmelCase__ : List[Any] = TFRobertaModel(a , name='text_model' ) return vision_model, text_model def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Dict = TFDeiTModelTester(self ) lowerCAmelCase__ : List[str] = TFRobertaModelTester(self ) lowerCAmelCase__ : str = vit_model_tester.prepare_config_and_inputs() lowerCAmelCase__ : List[Any] = bert_model_tester.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = vision_config_and_inputs ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Any = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class A__ ( __magic_name__ , unittest.TestCase ): def _lowerCamelCase ( self : List[str] ): '''simple docstring''' lowerCAmelCase__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' ) lowerCAmelCase__ : Dict = 13 lowerCAmelCase__ : str = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCAmelCase__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCAmelCase__ : Union[str, Any] = random_attention_mask([batch_size, 4] ) lowerCAmelCase__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def _lowerCamelCase ( self : str , a : int , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = TFCLIPVisionModel(a , name='vision_model' ) lowerCAmelCase__ : List[str] = TFBertModel(a , name='text_model' ) return vision_model, text_model def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Any = TFCLIPVisionModelTester(self ) lowerCAmelCase__ : Union[str, Any] = TFBertModelTester(self ) lowerCAmelCase__ : Any = clip_model_tester.prepare_config_and_inputs() lowerCAmelCase__ : Any = bert_model_tester.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = vision_config_and_inputs ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : str = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class A__ ( unittest.TestCase ): @slow def _lowerCamelCase ( self : int ): '''simple docstring''' lowerCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained( 'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=a ) lowerCAmelCase__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' ) lowerCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) lowerCAmelCase__ : Any = processor( text=['una foto di un gatto', 'una foto di un cane'] , images=a , padding=a , return_tensors='np' ) lowerCAmelCase__ : Union[str, Any] = model(**a ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowerCAmelCase__ : List[str] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , a , atol=1E-3 ) )
307
1