code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def __UpperCamelCase ( a : Optional[Any] ) ->str: snake_case = torch.exp(a ) snake_case = torch.sum(a , dim=1 ) # sum of exp(x_i) snake_case = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(a ) - B / A class _lowercase ( nn.Module ): def __init__( self , A__ ) -> Any: super().__init__() snake_case = config.output_attentions snake_case = config.output_hidden_states snake_case = nn.ModuleList([BertLayer(A__ ) for _ in range(config.num_hidden_layers )] ) snake_case = nn.ModuleList([BertHighway(A__ ) for _ in range(config.num_hidden_layers )] ) snake_case = [-1 for _ in range(config.num_hidden_layers )] def UpperCamelCase ( self , A__ ) -> Union[str, Any]: if (type(A__ ) is float) or (type(A__ ) is int): for i in range(len(self.early_exit_entropy ) ): snake_case = x else: snake_case = x def UpperCamelCase ( self , A__ ) -> Optional[Any]: snake_case = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def UpperCamelCase ( self , A__ , A__=None , A__=None , A__=None , A__=None , ) -> int: snake_case = () snake_case = () snake_case = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: snake_case = all_hidden_states + (hidden_states,) snake_case = layer_module( A__ , A__ , head_mask[i] , A__ , A__ ) snake_case = layer_outputs[0] if self.output_attentions: snake_case = all_attentions + (layer_outputs[1],) snake_case = (hidden_states,) if self.output_hidden_states: snake_case = current_outputs + (all_hidden_states,) if self.output_attentions: snake_case = current_outputs + (all_attentions,) snake_case = self.highway[i](A__ ) # logits, pooled_output if not self.training: snake_case = highway_exit[0] snake_case = entropy(A__ ) snake_case = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy snake_case = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: snake_case = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(A__ , i + 1 ) else: snake_case = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: snake_case = all_hidden_states + (hidden_states,) snake_case = (hidden_states,) if self.output_hidden_states: snake_case = outputs + (all_hidden_states,) if self.output_attentions: snake_case = outputs + (all_attentions,) snake_case = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( '''The Bert Model transformer with early exiting (DeeBERT). ''' , __a , ) class _lowercase ( __a ): def __init__( self , A__ ) -> str: super().__init__(A__ ) snake_case = config snake_case = BertEmbeddings(A__ ) snake_case = DeeBertEncoder(A__ ) snake_case = BertPooler(A__ ) self.init_weights() def UpperCamelCase ( self ) -> Dict: self.encoder.init_highway_pooler(self.pooler ) def UpperCamelCase ( self ) -> str: return self.embeddings.word_embeddings def UpperCamelCase ( self , A__ ) -> Any: snake_case = value def UpperCamelCase ( self , A__ ) -> Union[str, Any]: for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(A__ ) @add_start_docstrings_to_model_forward(A__ ) def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , ) -> str: if input_ids is not None and inputs_embeds is not None: raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' ) elif input_ids is not None: snake_case = input_ids.size() elif inputs_embeds is not None: snake_case = inputs_embeds.size()[:-1] else: raise ValueError('''You have to specify either input_ids or inputs_embeds''' ) snake_case = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: snake_case = torch.ones(A__ , device=A__ ) if encoder_attention_mask is None: snake_case = torch.ones(A__ , device=A__ ) if token_type_ids is None: snake_case = torch.zeros(A__ , dtype=torch.long , device=A__ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. snake_case = self.get_extended_attention_mask(A__ , A__ , A__ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: snake_case = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: snake_case = encoder_attention_mask[:, None, None, :] snake_case = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility snake_case = (1.0 - encoder_extended_attention_mask) * -1_00_00.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] snake_case = self.get_head_mask(A__ , self.config.num_hidden_layers ) snake_case = self.embeddings( input_ids=A__ , position_ids=A__ , token_type_ids=A__ , inputs_embeds=A__ ) snake_case = self.encoder( A__ , attention_mask=A__ , head_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , ) snake_case = encoder_outputs[0] snake_case = self.pooler(A__ ) snake_case = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class _lowercase ( __a ): def __init__( self , A__ , A__ ) -> Any: snake_case = message snake_case = exit_layer # start from 1! class _lowercase ( nn.Module ): def __init__( self , A__ ) -> str: super().__init__() snake_case = BertPooler(A__ ) snake_case = nn.Dropout(config.hidden_dropout_prob ) snake_case = nn.Linear(config.hidden_size , config.num_labels ) def UpperCamelCase ( self , A__ ) -> Optional[Any]: # Pooler snake_case = encoder_outputs[0] snake_case = self.pooler(A__ ) # "return" pooler_output # BertModel snake_case = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification snake_case = bmodel_output[1] snake_case = self.dropout(A__ ) snake_case = self.classifier(A__ ) return logits, pooled_output @add_start_docstrings( '''Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. ''' , __a , ) class _lowercase ( __a ): def __init__( self , A__ ) -> Union[str, Any]: super().__init__(A__ ) snake_case = config.num_labels snake_case = config.num_hidden_layers snake_case = DeeBertModel(A__ ) snake_case = nn.Dropout(config.hidden_dropout_prob ) snake_case = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(A__ ) def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , A__=-1 , A__=False , ) -> Tuple: snake_case = self.num_layers try: snake_case = self.bert( A__ , attention_mask=A__ , token_type_ids=A__ , position_ids=A__ , head_mask=A__ , inputs_embeds=A__ , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits snake_case = outputs[1] snake_case = self.dropout(A__ ) snake_case = self.classifier(A__ ) snake_case = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: snake_case = e.message snake_case = e.exit_layer snake_case = outputs[0] if not self.training: snake_case = entropy(A__ ) snake_case = [] snake_case = [] if labels is not None: if self.num_labels == 1: # We are doing regression snake_case = MSELoss() snake_case = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: snake_case = CrossEntropyLoss() snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits snake_case = [] for highway_exit in outputs[-1]: snake_case = highway_exit[0] if not self.training: highway_logits_all.append(A__ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression snake_case = MSELoss() snake_case = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: snake_case = CrossEntropyLoss() snake_case = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(A__ ) if train_highway: snake_case = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: snake_case = (loss,) + outputs if not self.training: snake_case = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: snake_case = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
711
'''simple docstring''' from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
44
0
import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _lowercase = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine' def __UpperCamelCase ( ) ->Dict: snake_case = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: snake_case = get_sagemaker_input() else: snake_case = get_cluster_input() return config def __UpperCamelCase ( a : List[str]=None ) ->int: if subparsers is not None: snake_case = subparsers.add_parser('''config''' , description=a ) else: snake_case = argparse.ArgumentParser('''Accelerate config command''' , description=a ) parser.add_argument( '''--config_file''' , default=a , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=a ) return parser def __UpperCamelCase ( a : Union[str, Any] ) ->Optional[int]: snake_case = get_user_input() if args.config_file is not None: snake_case = args.config_file else: if not os.path.isdir(a ): os.makedirs(a ) snake_case = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(a ) else: config.to_yaml_file(a ) print(f"""accelerate configuration saved at {config_file}""" ) def __UpperCamelCase ( ) ->Dict: snake_case = config_command_parser() snake_case = parser.parse_args() config_command(a ) if __name__ == "__main__": main()
712
'''simple docstring''' from ...processing_utils import ProcessorMixin class _lowercase ( __a ): _UpperCAmelCase = '''WhisperFeatureExtractor''' _UpperCAmelCase = '''WhisperTokenizer''' def __init__( self , A__ , A__ ) -> Optional[Any]: super().__init__(A__ , A__ ) snake_case = self.feature_extractor snake_case = False def UpperCamelCase ( self , A__=None , A__=None , A__=True ) -> Union[str, Any]: return self.tokenizer.get_decoder_prompt_ids(task=A__ , language=A__ , no_timestamps=A__ ) def __call__( self , *A__ , **A__ ) -> Dict: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*A__ , **A__ ) snake_case = kwargs.pop('''audio''' , A__ ) snake_case = kwargs.pop('''sampling_rate''' , A__ ) snake_case = kwargs.pop('''text''' , A__ ) if len(A__ ) > 0: snake_case = args[0] snake_case = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: snake_case = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ ) if text is not None: snake_case = self.tokenizer(A__ , **A__ ) if text is None: return inputs elif audio is None: return encodings else: snake_case = encodings['''input_ids'''] return inputs def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]: return self.tokenizer.batch_decode(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> str: return self.tokenizer.decode(*A__ , **A__ ) def UpperCamelCase ( self , A__ , A__="np" ) -> Optional[Any]: return self.tokenizer.get_prompt_ids(A__ , return_tensors=A__ )
44
0
'''simple docstring''' import pickle import numpy as np from matplotlib import pyplot as plt class _lowercase : def __init__( self , A__ , A__ , A__ , A__ , A__ , A__=0.2 , A__=0.2 ) -> int: snake_case = bp_numa snake_case = bp_numa snake_case = bp_numa snake_case = conva_get[:2] snake_case = conva_get[2] snake_case = size_pa snake_case = rate_w snake_case = rate_t snake_case = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] snake_case = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) snake_case = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) snake_case = -2 * np.random.rand(self.conva[1] ) + 1 snake_case = -2 * np.random.rand(self.num_bpa ) + 1 snake_case = -2 * np.random.rand(self.num_bpa ) + 1 def UpperCamelCase ( self , A__ ) -> Tuple: # save model dict with pickle snake_case = { '''num_bp1''': self.num_bpa, '''num_bp2''': self.num_bpa, '''num_bp3''': self.num_bpa, '''conv1''': self.conva, '''step_conv1''': self.step_conva, '''size_pooling1''': self.size_poolinga, '''rate_weight''': self.rate_weight, '''rate_thre''': self.rate_thre, '''w_conv1''': self.w_conva, '''wkj''': self.wkj, '''vji''': self.vji, '''thre_conv1''': self.thre_conva, '''thre_bp2''': self.thre_bpa, '''thre_bp3''': self.thre_bpa, } with open(A__ , '''wb''' ) as f: pickle.dump(A__ , A__ ) print(F"""Model saved: {save_path}""" ) @classmethod def UpperCamelCase ( cls , A__ ) -> Optional[Any]: # read saved model with open(A__ , '''rb''' ) as f: snake_case = pickle.load(A__ ) # noqa: S301 snake_case = model_dic.get('''conv1''' ) conv_get.append(model_dic.get('''step_conv1''' ) ) snake_case = model_dic.get('''size_pooling1''' ) snake_case = model_dic.get('''num_bp1''' ) snake_case = model_dic.get('''num_bp2''' ) snake_case = model_dic.get('''num_bp3''' ) snake_case = model_dic.get('''rate_weight''' ) snake_case = model_dic.get('''rate_thre''' ) # create model instance snake_case = CNN(A__ , A__ , A__ , A__ , A__ , A__ , A__ ) # modify model parameter snake_case = model_dic.get('''w_conv1''' ) snake_case = model_dic.get('''wkj''' ) snake_case = model_dic.get('''vji''' ) snake_case = model_dic.get('''thre_conv1''' ) snake_case = model_dic.get('''thre_bp2''' ) snake_case = model_dic.get('''thre_bp3''' ) return conv_ins def UpperCamelCase ( self , A__ ) -> str: return 1 / (1 + np.exp(-1 * x )) def UpperCamelCase ( self , A__ ) -> Any: return round(A__ , 3 ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> str: # convolution process snake_case = convs[0] snake_case = convs[1] snake_case = np.shape(A__ )[0] # get the data slice of original image data, data_focus snake_case = [] for i_focus in range(0 , size_data - size_conv + 1 , A__ ): for j_focus in range(0 , size_data - size_conv + 1 , A__ ): snake_case = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(A__ ) # calculate the feature map of every single kernel, and saved as list of matrix snake_case = [] snake_case = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(A__ ): snake_case = [] for i_focus in range(len(A__ ) ): snake_case = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(A__ ) ) snake_case = np.asmatrix(A__ ).reshape( A__ , A__ ) data_featuremap.append(A__ ) # expanding the data slice to One dimenssion snake_case = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(A__ ) ) snake_case = np.asarray(A__ ) return focus_list, data_featuremap def UpperCamelCase ( self , A__ , A__ , A__="average_pool" ) -> int: # pooling process snake_case = len(featuremaps[0] ) snake_case = int(size_map / size_pooling ) snake_case = [] for i_map in range(len(A__ ) ): snake_case = featuremaps[i_map] snake_case = [] for i_focus in range(0 , A__ , A__ ): for j_focus in range(0 , A__ , A__ ): snake_case = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(A__ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(A__ ) ) snake_case = np.asmatrix(A__ ).reshape(A__ , A__ ) featuremap_pooled.append(A__ ) return featuremap_pooled def UpperCamelCase ( self , A__ ) -> Tuple: # expanding three dimension data to one dimension list snake_case = [] for i in range(len(A__ ) ): snake_case = np.shape(data[i] ) snake_case = data[i].reshape(1 , shapes[0] * shapes[1] ) snake_case = data_listed.getA().tolist()[0] data_expanded.extend(A__ ) snake_case = np.asarray(A__ ) return data_expanded def UpperCamelCase ( self , A__ ) -> List[str]: # expanding matrix to one dimension list snake_case = np.asarray(A__ ) snake_case = np.shape(A__ ) snake_case = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]: snake_case = [] snake_case = 0 for i_map in range(A__ ): snake_case = np.ones((size_map, size_map) ) for i in range(0 , A__ , A__ ): for j in range(0 , A__ , A__ ): snake_case = pd_pool[ i_pool ] snake_case = i_pool + 1 snake_case = np.multiply( A__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(A__ ) return pd_all def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__=bool ) -> List[str]: # model traning print('''----------------------Start Training-------------------------''' ) print((''' - - Shape: Train_Data ''', np.shape(A__ )) ) print((''' - - Shape: Teach_Data ''', np.shape(A__ )) ) snake_case = 0 snake_case = [] snake_case = 1_00_00 while rp < n_repeat and mse >= error_accuracy: snake_case = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(A__ ) ): # print('------------Learning Image: %d--------------'%p) snake_case = np.asmatrix(datas_train[p] ) snake_case = np.asarray(datas_teach[p] ) snake_case , snake_case = self.convolute( A__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) snake_case = self.pooling(A__ , self.size_poolinga ) snake_case = np.shape(A__ ) snake_case = self._expand(A__ ) snake_case = data_bp_input snake_case = np.dot(A__ , self.vji.T ) - self.thre_bpa snake_case = self.sig(A__ ) snake_case = np.dot(A__ , self.wkj.T ) - self.thre_bpa snake_case = self.sig(A__ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- snake_case = np.multiply( (data_teach - bp_outa) , np.multiply(A__ , (1 - bp_outa) ) ) snake_case = np.multiply( np.dot(A__ , self.wkj ) , np.multiply(A__ , (1 - bp_outa) ) ) snake_case = np.dot(A__ , self.vji ) snake_case = pd_i_all / (self.size_poolinga * self.size_poolinga) snake_case = pd_conva_pooled.T.getA().tolist() snake_case = self._calculate_gradient_from_pool( A__ , A__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): snake_case = self._expand_mat(pd_conva_all[k_conv] ) snake_case = self.rate_weight * np.dot(A__ , A__ ) snake_case = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) snake_case = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer snake_case = self.wkj + pd_k_all.T * bp_outa * self.rate_weight snake_case = self.vji + pd_j_all.T * bp_outa * self.rate_weight snake_case = self.thre_bpa - pd_k_all * self.rate_thre snake_case = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image snake_case = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) snake_case = rp + 1 snake_case = error_count / patterns all_mse.append(A__ ) def draw_error(): snake_case = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(A__ , '''+-''' ) plt.plot(A__ , '''r--''' ) plt.xlabel('''Learning Times''' ) plt.ylabel('''All_mse''' ) plt.grid(A__ , alpha=0.5 ) plt.show() print('''------------------Training Complished---------------------''' ) print((''' - - Training epoch: ''', rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def UpperCamelCase ( self , A__ ) -> List[str]: # model predict snake_case = [] print('''-------------------Start Testing-------------------------''' ) print((''' - - Shape: Test_Data ''', np.shape(A__ )) ) for p in range(len(A__ ) ): snake_case = np.asmatrix(datas_test[p] ) snake_case , snake_case = self.convolute( A__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) snake_case = self.pooling(A__ , self.size_poolinga ) snake_case = self._expand(A__ ) snake_case = data_bp_input snake_case = bp_outa * self.vji.T - self.thre_bpa snake_case = self.sig(A__ ) snake_case = bp_outa * self.wkj.T - self.thre_bpa snake_case = self.sig(A__ ) produce_out.extend(bp_outa.getA().tolist() ) snake_case = [list(map(self.do_round , A__ ) ) for each in produce_out] return np.asarray(A__ ) def UpperCamelCase ( self , A__ ) -> Optional[Any]: # return the data of image after convoluting process so we can check it out snake_case = np.asmatrix(A__ ) snake_case , snake_case = self.convolute( A__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) snake_case = self.pooling(A__ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
713
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _lowercase ( __a ): _UpperCAmelCase = '''char''' _UpperCAmelCase = '''bpe''' _UpperCAmelCase = '''wp''' _lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _lowercase ( __a ): _UpperCAmelCase = ['''image_processor''', '''char_tokenizer'''] _UpperCAmelCase = '''ViTImageProcessor''' _UpperCAmelCase = '''MgpstrTokenizer''' def __init__( self , A__=None , A__=None , **A__ ) -> List[Any]: snake_case = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , A__ , ) snake_case = kwargs.pop('''feature_extractor''' ) snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) snake_case = tokenizer snake_case = AutoTokenizer.from_pretrained('''gpt2''' ) snake_case = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(A__ , A__ ) def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> List[str]: if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: snake_case = self.image_processor(A__ , return_tensors=A__ , **A__ ) if text is not None: snake_case = self.char_tokenizer(A__ , return_tensors=A__ , **A__ ) if text is None: return inputs elif images is None: return encodings else: snake_case = encodings['''input_ids'''] return inputs def UpperCamelCase ( self , A__ ) -> Dict: snake_case , snake_case , snake_case = sequences snake_case = char_preds.size(0 ) snake_case , snake_case = self._decode_helper(A__ , '''char''' ) snake_case , snake_case = self._decode_helper(A__ , '''bpe''' ) snake_case , snake_case = self._decode_helper(A__ , '''wp''' ) snake_case = [] snake_case = [] for i in range(A__ ): snake_case = [char_scores[i], bpe_scores[i], wp_scores[i]] snake_case = [char_strs[i], bpe_strs[i], wp_strs[i]] snake_case = scores.index(max(A__ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) snake_case = {} snake_case = final_strs snake_case = final_scores snake_case = char_strs snake_case = bpe_strs snake_case = wp_strs return out def UpperCamelCase ( self , A__ , A__ ) -> Optional[Any]: if format == DecodeType.CHARACTER: snake_case = self.char_decode snake_case = 1 snake_case = '''[s]''' elif format == DecodeType.BPE: snake_case = self.bpe_decode snake_case = 2 snake_case = '''#''' elif format == DecodeType.WORDPIECE: snake_case = self.wp_decode snake_case = 1_02 snake_case = '''[SEP]''' else: raise ValueError(F"""Format {format} is not supported.""" ) snake_case , snake_case = [], [] snake_case = pred_logits.size(0 ) snake_case = pred_logits.size(1 ) snake_case , snake_case = pred_logits.topk(1 , dim=-1 , largest=A__ , sorted=A__ ) snake_case = preds_index.view(-1 , A__ )[:, 1:] snake_case = decoder(A__ ) snake_case , snake_case = torch.nn.functional.softmax(A__ , dim=2 ).max(dim=2 ) snake_case = preds_max_prob[:, 1:] for index in range(A__ ): snake_case = preds_str[index].find(A__ ) snake_case = preds_str[index][:pred_eos] snake_case = preds_index[index].cpu().tolist() snake_case = pred_index.index(A__ ) if eos_token in pred_index else -1 snake_case = preds_max_prob[index][: pred_eos_index + 1] snake_case = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(A__ ) conf_scores.append(A__ ) return dec_strs, conf_scores def UpperCamelCase ( self , A__ ) -> int: snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(A__ )] return decode_strs def UpperCamelCase ( self , A__ ) -> List[str]: return self.bpe_tokenizer.batch_decode(A__ ) def UpperCamelCase ( self , A__ ) -> Union[str, Any]: snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(A__ )] return decode_strs
44
0
'''simple docstring''' import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer _lowercase = logging.getLogger(__name__) def __UpperCamelCase ( ) ->Dict: snake_case = argparse.ArgumentParser( description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' ) parser.add_argument( '''--dataset_name''' , type=a , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , ) parser.add_argument( '''--dataset_config''' , type=a , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' ) parser.add_argument( '''--tokenizer_name_or_path''' , type=a , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , ) parser.add_argument( '''--shard_size''' , type=a , default=1000 , help='''Number of entries to go in a single shard.''' , ) parser.add_argument('''--split''' , type=a , default='''train''' , choices=['''train''', '''test''', '''validation'''] ) parser.add_argument( '''--limit''' , default=a , type=a , help='''Limit the number of shards (used for debugging).''' , ) parser.add_argument( '''--max_length''' , type=a , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum''' ''' sequence length that is a multiple of 8.''' , ) parser.add_argument( '''--output_dir''' , default='''tf-tpu''' , type=a , help='''Output directory where the TFRecord shards will be saved. If the''' ''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord''' ''' shards will be directly saved to a Google Cloud Storage bucket.''' , ) snake_case = parser.parse_args() return args def __UpperCamelCase ( a : List[Any] ) ->Tuple: def fn(a : Optional[int] ): return tokenizer(examples['''text'''] ) return fn def __UpperCamelCase ( a : Any ) ->int: snake_case = [] for i in range(len(tokenized_data['''input_ids'''] ) ): snake_case = { '''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ), '''attention_mask''': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ), } snake_case = tf.train.Features(feature=a ) snake_case = tf.train.Example(features=a ) snake_case = example.SerializeToString() records.append(a ) return records def __UpperCamelCase ( a : Tuple ) ->List[str]: snake_case = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: snake_case = min(len(a ) , args.limit ) snake_case = dataset.select(range(a ) ) print(f"""Limiting the dataset to {args.limit} entries.""" ) snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) snake_case = os.path.join(args.output_dir , args.split ) if not os.path.exists(a ): os.makedirs(a ) else: snake_case = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. snake_case = tokenize_function(a ) snake_case = dataset.map(a , batched=a , num_proc=4 , remove_columns=['''text'''] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(a : List[str] ): # Concatenate all texts. snake_case = {k: sum(examples[k] , [] ) for k in examples.keys()} snake_case = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 snake_case = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. snake_case = { k: [t[i : i + args.max_length] for i in range(0 , a , args.max_length )] for k, t in concatenated_examples.items() } return result snake_case = dataset_tokenized.map(a , batched=a , batch_size=1000 , num_proc=4 ) snake_case = 0 snake_case = 0 for shard in range(0 , len(a ) , args.shard_size ): snake_case = grouped_dataset[shard : shard + args.shard_size] snake_case = len(dataset_snapshot['''input_ids'''] ) snake_case = os.path.join(a , f"""dataset-{shard_count}-{records_containing}.tfrecord""" ) snake_case = get_serialized_examples(a ) with tf.io.TFRecordWriter(a ) as out_file: for i in range(len(a ) ): snake_case = serialized_examples[i] out_file.write(a ) print('''Wrote file {} containing {} records'''.format(a , a ) ) shard_count += 1 total_records += records_containing with open(f"""split-{args.split}-records-count.txt""" , '''w''' ) as f: print(f"""Total {args.split} records: {total_records}""" , file=a ) if __name__ == "__main__": _lowercase = parse_args() main(args)
714
'''simple docstring''' import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType _lowercase , _lowercase , _lowercase = False, False, False @dataclass class _lowercase : _UpperCAmelCase = None _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = None # Automatically constructed _UpperCAmelCase = "dict" _UpperCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) _UpperCAmelCase = field(default='''Audio''' , init=__a , repr=__a ) def __call__( self ) -> Optional[Any]: return self.pa_type def UpperCamelCase ( self , A__ ) -> dict: try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err if isinstance(A__ , A__ ): return {"bytes": None, "path": value} elif isinstance(A__ , A__ ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes snake_case = BytesIO() sf.write(A__ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith('''pcm''' ): # "PCM" only has raw audio bytes if value.get('''sampling_rate''' ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' ) if value.get('''bytes''' ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) snake_case = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67 else: snake_case = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67 snake_case = BytesIO(bytes() ) sf.write(A__ , A__ , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def UpperCamelCase ( self , A__ , A__ = None ) -> dict: if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' ) snake_case , snake_case = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None) if path is None and file is None: raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err snake_case = xsplitext(A__ )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( '''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( '''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) if file is None: snake_case = token_per_repo_id or {} snake_case = path.split('''::''' )[-1] try: snake_case = string_to_dict(A__ , config.HUB_DATASETS_URL )['''repo_id'''] snake_case = token_per_repo_id[repo_id] except (ValueError, KeyError): snake_case = None with xopen(A__ , '''rb''' , use_auth_token=A__ ) as f: snake_case , snake_case = sf.read(A__ ) else: snake_case , snake_case = sf.read(A__ ) snake_case = array.T if self.mono: snake_case = librosa.to_mono(A__ ) if self.sampling_rate and self.sampling_rate != sampling_rate: snake_case = librosa.resample(A__ , orig_sr=A__ , target_sr=self.sampling_rate ) snake_case = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def UpperCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value if self.decode: raise ValueError('''Cannot flatten a decoded Audio feature.''' ) return { "bytes": Value('''binary''' ), "path": Value('''string''' ), } def UpperCamelCase ( self , A__ ) -> pa.StructArray: if pa.types.is_string(storage.type ): snake_case = pa.array([None] * len(A__ ) , type=pa.binary() ) snake_case = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): snake_case = pa.array([None] * len(A__ ) , type=pa.string() ) snake_case = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ): snake_case = pa.array([Audio().encode_example(A__ ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: snake_case = storage.field('''bytes''' ) else: snake_case = pa.array([None] * len(A__ ) , type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: snake_case = storage.field('''path''' ) else: snake_case = pa.array([None] * len(A__ ) , type=pa.string() ) snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) return array_cast(A__ , self.pa_type ) def UpperCamelCase ( self , A__ ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(A__ ): with xopen(A__ , '''rb''' ) as f: snake_case = f.read() return bytes_ snake_case = pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) snake_case = pa.array( [os.path.basename(A__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , ) snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() ) return array_cast(A__ , self.pa_type )
44
0
'''simple docstring''' def __UpperCamelCase ( a : list[list] ) ->list[list]: snake_case = current_set.copy() for row_index, row in enumerate(a ): snake_case = row[0] for column_index, column in enumerate(a ): if magnitude == 0: snake_case = column continue snake_case = column / magnitude # Subtract to cancel term snake_case = current_set[0] snake_case = [first_row] snake_case = current_set[1::] for row in current_set: snake_case = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(a ) continue for column_index in range(len(a ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(a ) # Create next recursion iteration set if len(final_set[0] ) != 3: snake_case = final_set[0] snake_case = [] snake_case = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) snake_case = simplify(a ) for i in range(len(a ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , a ) snake_case = resultant return final_set def __UpperCamelCase ( a : list[list] ) ->list: if len(a ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) snake_case = len(a ) + 1 if any(len(a ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(a , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(a ) == 1: return [equations[0][-1] / equations[0][0]] snake_case = equations.copy() if any(0 in row for row in data_set ): snake_case = data_set.copy() snake_case = [] for row_index, row in enumerate(a ): if 0 not in row: snake_case = data_set.pop(a ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , a ) snake_case = data_set.copy() snake_case = simplify(a ) snake_case = simplified[::-1] snake_case = [] for row in simplified: snake_case = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue snake_case = row.copy()[: len(a ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(a ) == 0: solutions.append(0 ) continue snake_case = temp_row[1::] snake_case = temp_row[::-1] for column_index, column in enumerate(a ): current_solution -= column * solutions[column_index] solutions.append(a ) snake_case = [] for item in solutions: final.append(float(round(a , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() _lowercase = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
715
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class _lowercase : @staticmethod def UpperCamelCase ( *A__ , **A__ ) -> List[Any]: pass def __UpperCamelCase ( a : Image ) ->str: snake_case = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class _lowercase ( unittest.TestCase ): _UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]: snake_case = DepthEstimationPipeline(model=A__ , image_processor=A__ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def UpperCamelCase ( self , A__ , A__ ) -> List[Any]: snake_case = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , A__ ) import datasets snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) snake_case = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , A__ , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def UpperCamelCase ( self ) -> Optional[Any]: pass @slow @require_torch def UpperCamelCase ( self ) -> Dict: snake_case = '''Intel/dpt-large''' snake_case = pipeline('''depth-estimation''' , model=A__ ) snake_case = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) snake_case = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 ) @require_torch def UpperCamelCase ( self ) -> Any: # This is highly irregular to have no small tests. self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
44
0
'''simple docstring''' class _lowercase : def __init__( self , A__ , A__ , A__ ) -> int: snake_case = name snake_case = value snake_case = weight def __repr__( self ) -> str: return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})""" def UpperCamelCase ( self ) -> Any: return self.value def UpperCamelCase ( self ) -> List[str]: return self.name def UpperCamelCase ( self ) -> Optional[Any]: return self.weight def UpperCamelCase ( self ) -> Optional[int]: return self.value / self.weight def __UpperCamelCase ( a : int , a : Optional[int] , a : int ) ->Dict: snake_case = [] for i in range(len(a ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __UpperCamelCase ( a : List[str] , a : Optional[int] , a : Dict ) ->Any: snake_case = sorted(a , key=a , reverse=a ) snake_case = [] snake_case , snake_case = 0.0, 0.0 for i in range(len(a ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __UpperCamelCase ( ) ->Union[str, Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
716
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __UpperCamelCase ( a : Optional[int] ) ->Dict: snake_case = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(a , a ) def __UpperCamelCase ( a : Optional[Any] ) ->int: snake_case = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: snake_case = s_dict.pop(a ) elif "subsample" in key: snake_case = s_dict.pop(a ) def __UpperCamelCase ( a : Optional[int] ) ->Optional[int]: snake_case , snake_case = emb.weight.shape snake_case = nn.Linear(a , a , bias=a ) snake_case = emb.weight.data return lin_layer def __UpperCamelCase ( a : Any , a : Tuple ) ->Tuple: snake_case = torch.load(a , map_location='''cpu''' ) snake_case = mam_aaa['''args'''] snake_case = mam_aaa['''model'''] snake_case = state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(a ) rename_keys(a ) snake_case = state_dict['''decoder.embed_tokens.weight'''].shape[0] snake_case = args.share_decoder_input_output_embed snake_case = [int(a ) for i in args.conv_kernel_sizes.split(''',''' )] snake_case = SpeechaTextConfig( vocab_size=a , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(a ) , conv_channels=args.conv_channels , conv_kernel_sizes=a , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=a , num_beams=5 , max_length=200 , use_cache=a , decoder_start_token_id=2 , early_stopping=a , ) snake_case = SpeechaTextForConditionalGeneration(a ) snake_case , snake_case = model.model.load_state_dict(a , strict=a ) if len(a ) > 0 and not set(a ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f""" but all the following weights are missing {missing}""" ) if tie_embeds: snake_case = make_linear_from_emb(model.model.decoder.embed_tokens ) else: snake_case = lm_head_weights model.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') _lowercase = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
44
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { 'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json', # See all GLPN models at https://huggingface.co/models?filter=glpn } class _lowercase ( __a ): _UpperCAmelCase = '''glpn''' def __init__( self , A__=3 , A__=4 , A__=[2, 2, 2, 2] , A__=[8, 4, 2, 1] , A__=[32, 64, 1_60, 2_56] , A__=[7, 3, 3, 3] , A__=[4, 2, 2, 2] , A__=[1, 2, 5, 8] , A__=[4, 4, 4, 4] , A__="gelu" , A__=0.0 , A__=0.0 , A__=0.0_2 , A__=0.1 , A__=1e-6 , A__=64 , A__=10 , A__=-1 , **A__ , ) -> Union[str, Any]: super().__init__(**A__ ) snake_case = num_channels snake_case = num_encoder_blocks snake_case = depths snake_case = sr_ratios snake_case = hidden_sizes snake_case = patch_sizes snake_case = strides snake_case = mlp_ratios snake_case = num_attention_heads snake_case = hidden_act snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = initializer_range snake_case = drop_path_rate snake_case = layer_norm_eps snake_case = decoder_hidden_size snake_case = max_depth snake_case = head_in_index
717
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowercase ( metaclass=__a ): _UpperCAmelCase = ['''transformers''', '''torch''', '''note_seq'''] def __init__( self , *A__ , **A__ ) -> Union[str, Any]: requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def UpperCamelCase ( cls , *A__ , **A__ ) -> Optional[Any]: requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def UpperCamelCase ( cls , *A__ , **A__ ) -> Any: requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
44
0
'''simple docstring''' _lowercase = { 'Pillow': 'Pillow', 'accelerate': 'accelerate>=0.11.0', 'compel': 'compel==0.1.8', 'black': 'black~=23.1', 'datasets': 'datasets', 'filelock': 'filelock', 'flax': 'flax>=0.4.1', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.13.2', 'requests-mock': 'requests-mock==1.10.0', 'importlib_metadata': 'importlib_metadata', 'invisible-watermark': 'invisible-watermark', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2', 'jaxlib': 'jaxlib>=0.1.65', 'Jinja2': 'Jinja2', 'k-diffusion': 'k-diffusion>=0.0.12', 'torchsde': 'torchsde', 'note_seq': 'note_seq', 'librosa': 'librosa', 'numpy': 'numpy', 'omegaconf': 'omegaconf', 'parameterized': 'parameterized', 'protobuf': 'protobuf>=3.20.3,<4', 'pytest': 'pytest', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'ruff': 'ruff>=0.0.241', 'safetensors': 'safetensors', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'scipy': 'scipy', 'onnx': 'onnx', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'tensorboard': 'tensorboard', 'torch': 'torch>=1.4', 'torchvision': 'torchvision', 'transformers': 'transformers>=4.25.1', 'urllib3': 'urllib3<=2.0.0', }
718
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator class _lowercase : def __init__( self , A__ ) -> None: snake_case = value snake_case = None snake_case = None class _lowercase : def __init__( self , A__ ) -> None: snake_case = tree def UpperCamelCase ( self , A__ ) -> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self ) -> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
44
0
'''simple docstring''' from __future__ import annotations from scipy.special import comb # type: ignore class _lowercase : def __init__( self , A__ ) -> List[Any]: snake_case = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. snake_case = len(A__ ) - 1 def UpperCamelCase ( self , A__ ) -> list[float]: assert 0 <= t <= 1, "Time t must be between 0 and 1." snake_case = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , A__ ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(A__ ) , 5 ) == 1 return output_values def UpperCamelCase ( self , A__ ) -> tuple[float, float]: assert 0 <= t <= 1, "Time t must be between 0 and 1." snake_case = self.basis_function(A__ ) snake_case = 0.0 snake_case = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def UpperCamelCase ( self , A__ = 0.0_1 ) -> str: from matplotlib import pyplot as plt # type: ignore snake_case = [] # x coordinates of points to plot snake_case = [] # y coordinates of points to plot snake_case = 0.0 while t <= 1: snake_case = self.bezier_curve_function(A__ ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size snake_case = [i[0] for i in self.list_of_points] snake_case = [i[1] for i in self.list_of_points] plt.plot( A__ , A__ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , ) plt.scatter(A__ , A__ , color='''red''' , label='''Control Points''' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
719
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) _lowercase = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] _lowercase = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def __UpperCamelCase ( a : List[str] ) ->Optional[int]: snake_case = torch.load(a , map_location='''cpu''' ) return sd def __UpperCamelCase ( a : Optional[int] , a : Union[str, Any] , a : int=rename_keys_prefix ) ->Tuple: snake_case = OrderedDict() snake_case = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue snake_case = key for name_pair in rename_keys_prefix: snake_case = new_key.replace(name_pair[0] , name_pair[1] ) snake_case = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately snake_case = new_d['''cls.predictions.bias'''] return new_d @torch.no_grad() def __UpperCamelCase ( a : Optional[int] , a : int ) ->Union[str, Any]: assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: snake_case = '''pretraining''' if "vcr" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 512} elif "vqa_advanced" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048} elif "vqa" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048} elif "nlvr" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 1024} else: raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 512} snake_case = '''multichoice''' elif "vqa_advanced" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048} snake_case = '''vqa_advanced''' elif "vqa" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129} snake_case = '''vqa''' elif "nlvr" in checkpoint_path: snake_case = { '''visual_embedding_dim''': 1024, '''num_labels''': 2, } snake_case = '''nlvr''' snake_case = VisualBertConfig(**a ) # Load State Dict snake_case = load_state_dict(a ) snake_case = get_new_dict(a , a ) if model_type == "pretraining": snake_case = VisualBertForPreTraining(a ) elif model_type == "vqa": snake_case = VisualBertForQuestionAnswering(a ) elif model_type == "nlvr": snake_case = VisualBertForVisualReasoning(a ) elif model_type == "multichoice": snake_case = VisualBertForMultipleChoice(a ) model.load_state_dict(a ) # Save Checkpoints Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') _lowercase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
44
0
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class _lowercase : def __init__( self , A__=2 , A__=3 , A__=64 , A__=None ) -> Dict: snake_case = np.random.default_rng(A__ ) snake_case = length snake_case = rng.normal(size=(length,) ).astype(np.floataa ) snake_case = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ) -> str: return self.length def __getitem__( self , A__ ) -> Optional[Any]: return {"x": self.x[i], "y": self.y[i]} class _lowercase ( torch.nn.Module ): def __init__( self , A__=0 , A__=0 , A__=False ) -> str: super().__init__() snake_case = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) snake_case = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) snake_case = True def UpperCamelCase ( self , A__=None ) -> Tuple: if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) snake_case = False return x * self.a[0] + self.b[0] class _lowercase ( torch.nn.Module ): def __init__( self , A__=0 , A__=0 , A__=False ) -> Optional[int]: super().__init__() snake_case = torch.nn.Parameter(torch.tensor(A__ ).float() ) snake_case = torch.nn.Parameter(torch.tensor(A__ ).float() ) snake_case = True def UpperCamelCase ( self , A__=None ) -> List[str]: if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) snake_case = False return x * self.a + self.b def __UpperCamelCase ( a : Tuple , a : int = 16 ) ->Dict: from datasets import load_dataset from transformers import AutoTokenizer snake_case = AutoTokenizer.from_pretrained('''bert-base-cased''' ) snake_case = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} snake_case = load_dataset('''csv''' , data_files=a ) snake_case = datasets['''train'''].unique('''label''' ) snake_case = {v: i for i, v in enumerate(a )} def tokenize_function(a : List[str] ): # max_length=None => use the model max length (it's actually the default) snake_case = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=a , max_length=a , padding='''max_length''' ) if "label" in examples: snake_case = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset snake_case = datasets.map( a , batched=a , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(a : Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(a , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(a , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. snake_case = DataLoader(tokenized_datasets['''train'''] , shuffle=a , collate_fn=a , batch_size=2 ) snake_case = DataLoader(tokenized_datasets['''validation'''] , shuffle=a , collate_fn=a , batch_size=1 ) return train_dataloader, eval_dataloader
720
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def __UpperCamelCase ( a : Dict , a : Optional[int] , a : Dict , a : Dict ) ->Union[str, Any]: snake_case = original_name.split('''.''' )[0] snake_case = key.split('''.''' ) snake_case = int(key_list[key_list.index(a ) - 2] ) snake_case = int(key_list[key_list.index(a ) - 1] ) snake_case = orig_block_num - offset snake_case = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" ) return key def __UpperCamelCase ( a : Tuple ) ->Dict: snake_case = OrderedDict() snake_case , snake_case = 0, 0 for key, value in state_dict.items(): if key.startswith('''network''' ): snake_case = key.replace('''network''' , '''poolformer.encoder''' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('''bias''' ) and "patch_embed" not in key: patch_emb_offset += 1 snake_case = key[: key.find('''proj''' )] snake_case = key.replace(a , f"""patch_embeddings.{total_embed_found}.""" ) snake_case = key.replace('''proj''' , '''projection''' ) if key.endswith('''bias''' ): total_embed_found += 1 if "patch_embeddings" in key: snake_case = '''poolformer.encoder.''' + key if "mlp.fc1" in key: snake_case = replace_key_with_offset(a , a , '''mlp.fc1''' , '''output.conv1''' ) if "mlp.fc2" in key: snake_case = replace_key_with_offset(a , a , '''mlp.fc2''' , '''output.conv2''' ) if "norm1" in key: snake_case = replace_key_with_offset(a , a , '''norm1''' , '''before_norm''' ) if "norm2" in key: snake_case = replace_key_with_offset(a , a , '''norm2''' , '''after_norm''' ) if "layer_scale_1" in key: snake_case = replace_key_with_offset(a , a , '''layer_scale_1''' , '''layer_scale_1''' ) if "layer_scale_2" in key: snake_case = replace_key_with_offset(a , a , '''layer_scale_2''' , '''layer_scale_2''' ) if "head" in key: snake_case = key.replace('''head''' , '''classifier''' ) snake_case = value return new_state_dict def __UpperCamelCase ( ) ->Optional[int]: snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case = Image.open(requests.get(a , stream=a ).raw ) return image @torch.no_grad() def __UpperCamelCase ( a : Dict , a : Optional[Any] , a : Tuple ) ->List[str]: snake_case = PoolFormerConfig() # set attributes based on model_name snake_case = '''huggingface/label-files''' snake_case = model_name[-3:] snake_case = 1000 snake_case = '''imagenet-1k-id2label.json''' snake_case = (1, 1000) # set config attributes snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) ) snake_case = {int(a ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} if size == "s12": snake_case = [2, 2, 6, 2] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 0.9 elif size == "s24": snake_case = [4, 4, 12, 4] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 0.9 elif size == "s36": snake_case = [6, 6, 18, 6] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.9 elif size == "m36": snake_case = [6, 6, 18, 6] snake_case = [96, 192, 384, 768] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.95 elif size == "m48": snake_case = [8, 8, 24, 8] snake_case = [96, 192, 384, 768] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.95 else: raise ValueError(f"""Size {size} not supported""" ) # load image processor snake_case = PoolFormerImageProcessor(crop_pct=a ) # Prepare image snake_case = prepare_img() snake_case = image_processor(images=a , return_tensors='''pt''' ).pixel_values logger.info(f"""Converting model {model_name}...""" ) # load original state dict snake_case = torch.load(a , map_location=torch.device('''cpu''' ) ) # rename keys snake_case = rename_keys(a ) # create HuggingFace model and load state dict snake_case = PoolFormerForImageClassification(a ) model.load_state_dict(a ) model.eval() # Define image processor snake_case = PoolFormerImageProcessor(crop_pct=a ) snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values # forward pass snake_case = model(a ) snake_case = outputs.logits # define expected logit slices for different models if size == "s12": snake_case = torch.tensor([-0.3045, -0.6758, -0.4869] ) elif size == "s24": snake_case = torch.tensor([0.4402, -0.1374, -0.8045] ) elif size == "s36": snake_case = torch.tensor([-0.6080, -0.5133, -0.5898] ) elif size == "m36": snake_case = torch.tensor([0.3952, 0.2263, -1.2668] ) elif size == "m48": snake_case = torch.tensor([0.1167, -0.0656, -0.3423] ) else: raise ValueError(f"""Size {size} not supported""" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a , atol=1e-2 ) # finally, save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) _lowercase = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
44
0
import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def __UpperCamelCase ( a : List[Any]=None ) ->List[str]: snake_case = argparse.ArgumentParser(add_help=a , allow_abbrev=a ) # The main config parser snake_case = config_command_parser(a ) # The subparser to add commands to snake_case = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' ) # Then add other parsers with the parent parser default_command_parser(a , parents=[parent_parser] ) update_command_parser(a , parents=[parent_parser] ) return config_parser def __UpperCamelCase ( ) ->List[str]: snake_case = get_config_parser() snake_case = config_parser.parse_args() if not hasattr(a , '''func''' ): config_parser.print_help() exit(1 ) # Run args.func(a ) if __name__ == "__main__": main()
721
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow _lowercase = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) _lowercase = logging.getLogger() def __UpperCamelCase ( ) ->Tuple: snake_case = argparse.ArgumentParser() parser.add_argument('''-f''' ) snake_case = parser.parse_args() return args.f def __UpperCamelCase ( a : Dict , a : Tuple="eval" ) ->List[Any]: snake_case = os.path.join(a , f"""{split}_results.json""" ) if os.path.exists(a ): with open(a , '''r''' ) as f: return json.load(a ) raise ValueError(f"""can't find {path}""" ) _lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _lowercase ( __a ): def UpperCamelCase ( self ) -> List[str]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(A__ , '''argv''' , A__ ): run_flax_glue.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 ) @slow def UpperCamelCase ( self ) -> List[Any]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A__ , '''argv''' , A__ ): run_clm_flax.main() snake_case = get_results(A__ ) self.assertLess(result['''eval_perplexity'''] , 1_00 ) @slow def UpperCamelCase ( self ) -> int: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(A__ , '''argv''' , A__ ): run_summarization_flax.main() snake_case = get_results(A__ , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(A__ , '''argv''' , A__ ): run_mlm_flax.main() snake_case = get_results(A__ ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def UpperCamelCase ( self ) -> Dict: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A__ , '''argv''' , A__ ): run_ta_mlm_flax.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 ) @slow def UpperCamelCase ( self ) -> int: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu snake_case = 7 if get_gpu_count() > 1 else 2 snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(A__ , '''argv''' , A__ ): run_flax_ner.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def UpperCamelCase ( self ) -> Any: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(A__ , '''argv''' , A__ ): run_qa.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
44
0
'''simple docstring''' def __UpperCamelCase ( a : int , a : int ) ->int: return x if y == 0 else greatest_common_divisor(a , x % y ) def __UpperCamelCase ( a : int , a : int ) ->int: return (x * y) // greatest_common_divisor(a , a ) def __UpperCamelCase ( a : int = 20 ) ->int: snake_case = 1 for i in range(1 , n + 1 ): snake_case = lcm(a , a ) return g if __name__ == "__main__": print(f'{solution() = }')
700
'''simple docstring''' from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS _lowercase = logging.get_logger(__name__) _lowercase = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, 'constant': get_constant_schedule, 'constant_w_warmup': get_constant_schedule_with_warmup, } class _lowercase ( __a ): def __init__( self , A__=None , A__=None , *A__ , **A__ ) -> Union[str, Any]: super().__init__(*A__ , **A__ ) if config is None: assert isinstance(self.model , A__ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F""" {self.model.__class__}""" ) snake_case = self.model.config else: snake_case = config snake_case = data_args snake_case = self.config.tgt_vocab_size if isinstance(self.config , A__ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for""" ''' padding..''' ) if self.args.label_smoothing == 0: snake_case = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss snake_case = label_smoothed_nll_loss def UpperCamelCase ( self , A__ ) -> Tuple: if self.optimizer is None: snake_case = ['''bias''', '''LayerNorm.weight'''] snake_case = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] snake_case = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: snake_case = Adafactor snake_case = {'''scale_parameter''': False, '''relative_step''': False} else: snake_case = AdamW snake_case = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } snake_case = self.args.learning_rate if self.sharded_ddp: snake_case = OSS( params=A__ , optim=A__ , **A__ , ) else: snake_case = optimizer_cls(A__ , **A__ ) if self.lr_scheduler is None: snake_case = self._get_lr_scheduler(A__ ) else: # ignoring --lr_scheduler logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' ) def UpperCamelCase ( self , A__ ) -> Tuple: snake_case = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": snake_case = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": snake_case = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: snake_case = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A__ ) return scheduler def UpperCamelCase ( self ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[Any]: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token snake_case = model(**A__ , use_cache=A__ )[0] snake_case = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models snake_case , snake_case = model(**A__ , labels=A__ , use_cache=A__ )[:2] else: # compute label smoothed loss snake_case = model(**A__ , use_cache=A__ )[0] snake_case = torch.nn.functional.log_softmax(A__ , dim=-1 ) snake_case , snake_case = self.loss_fn(A__ , A__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def UpperCamelCase ( self , A__ , A__ ) -> Any: snake_case = inputs.pop('''labels''' ) snake_case , snake_case = self._compute_loss(A__ , A__ , A__ ) return loss def UpperCamelCase ( self , A__ , A__ , A__ , A__ = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: snake_case = self._prepare_inputs(A__ ) snake_case = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: snake_case = self.model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **A__ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] ) snake_case = inputs.pop('''labels''' ) with torch.no_grad(): # compute loss on predict data snake_case , snake_case = self._compute_loss(A__ , A__ , A__ ) snake_case = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) snake_case = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] ) return (loss, logits, labels) def UpperCamelCase ( self , A__ , A__ ) -> List[str]: # If PAD token is not defined at least EOS token has to be defined snake_case = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( '''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be''' F""" padded to `max_length`={max_length}""" ) snake_case = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) snake_case = tensor return padded_tensor
44
0
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __UpperCamelCase ( a : Optional[int] ) ->Dict: snake_case = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(a , a ) def __UpperCamelCase ( a : Optional[Any] ) ->int: snake_case = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: snake_case = s_dict.pop(a ) elif "subsample" in key: snake_case = s_dict.pop(a ) def __UpperCamelCase ( a : Optional[int] ) ->Optional[int]: snake_case , snake_case = emb.weight.shape snake_case = nn.Linear(a , a , bias=a ) snake_case = emb.weight.data return lin_layer def __UpperCamelCase ( a : Any , a : Tuple ) ->Tuple: snake_case = torch.load(a , map_location='''cpu''' ) snake_case = mam_aaa['''args'''] snake_case = mam_aaa['''model'''] snake_case = state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(a ) rename_keys(a ) snake_case = state_dict['''decoder.embed_tokens.weight'''].shape[0] snake_case = args.share_decoder_input_output_embed snake_case = [int(a ) for i in args.conv_kernel_sizes.split(''',''' )] snake_case = SpeechaTextConfig( vocab_size=a , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(a ) , conv_channels=args.conv_channels , conv_kernel_sizes=a , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=a , num_beams=5 , max_length=200 , use_cache=a , decoder_start_token_id=2 , early_stopping=a , ) snake_case = SpeechaTextForConditionalGeneration(a ) snake_case , snake_case = model.model.load_state_dict(a , strict=a ) if len(a ) > 0 and not set(a ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f""" but all the following weights are missing {missing}""" ) if tie_embeds: snake_case = make_linear_from_emb(model.model.decoder.embed_tokens ) else: snake_case = lm_head_weights model.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') _lowercase = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
701
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def __UpperCamelCase ( a : List[str] ) ->str: snake_case = [] for line in lines: snake_case = re.sub(R'''#.*''' , '''''' , a ) # remove comments if line: filtered_lines.append(a ) snake_case = '''\n'''.join(a ) # Make a hash from all this code snake_case = full_str.encode('''utf-8''' ) return shaaaa(a ).hexdigest() # get importable module names and hash for caching _lowercase = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions _lowercase = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _lowercase = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name _lowercase = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
44
0
'''simple docstring''' import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class _lowercase ( __a ): _UpperCAmelCase = ['''image_processor''', '''tokenizer'''] _UpperCAmelCase = '''AutoImageProcessor''' _UpperCAmelCase = '''AutoTokenizer''' def __init__( self , A__=None , A__=None , **A__ ): snake_case = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , A__ , ) snake_case = kwargs.pop('''feature_extractor''' ) snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(A__ , A__ ) snake_case = self.image_processor snake_case = False def __call__( self , *A__ , **A__ ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*A__ , **A__ ) snake_case = kwargs.pop('''images''' , A__ ) snake_case = kwargs.pop('''text''' , A__ ) if len(A__ ) > 0: snake_case = args[0] snake_case = args[1:] if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: snake_case = self.image_processor(A__ , *A__ , **A__ ) if text is not None: snake_case = self.tokenizer(A__ , **A__ ) if text is None: return inputs elif images is None: return encodings else: snake_case = encodings['''input_ids'''] return inputs def UpperCamelCase ( self , *A__ , **A__ ): return self.tokenizer.batch_decode(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ): return self.tokenizer.decode(*A__ , **A__ ) @contextmanager def UpperCamelCase ( self ): warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your images inputs, or in a separate call.''' ) snake_case = True snake_case = self.tokenizer yield snake_case = self.image_processor snake_case = False def UpperCamelCase ( self , A__ , A__=False , A__=None ): if added_vocab is None: snake_case = self.tokenizer.get_added_vocab() snake_case = {} while tokens: snake_case = re.search(R'''<s_(.*?)>''' , A__ , re.IGNORECASE ) if start_token is None: break snake_case = start_token.group(1 ) snake_case = re.search(RF"""</s_{key}>""" , A__ , re.IGNORECASE ) snake_case = start_token.group() if end_token is None: snake_case = tokens.replace(A__ , '''''' ) else: snake_case = end_token.group() snake_case = re.escape(A__ ) snake_case = re.escape(A__ ) snake_case = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , A__ , re.IGNORECASE ) if content is not None: snake_case = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node snake_case = self.tokenajson(A__ , is_inner_value=A__ , added_vocab=A__ ) if value: if len(A__ ) == 1: snake_case = value[0] snake_case = value else: # leaf nodes snake_case = [] for leaf in content.split(R'''<sep/>''' ): snake_case = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": snake_case = leaf[1:-2] # for categorical special tokens output[key].append(A__ ) if len(output[key] ) == 1: snake_case = output[key][0] snake_case = tokens[tokens.find(A__ ) + len(A__ ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=A__ , added_vocab=A__ ) if len(A__ ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def UpperCamelCase ( self ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A__ , ) return self.image_processor_class @property def UpperCamelCase ( self ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , A__ , ) return self.image_processor
702
'''simple docstring''' _lowercase = { 'Pillow': 'Pillow', 'accelerate': 'accelerate>=0.11.0', 'compel': 'compel==0.1.8', 'black': 'black~=23.1', 'datasets': 'datasets', 'filelock': 'filelock', 'flax': 'flax>=0.4.1', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.13.2', 'requests-mock': 'requests-mock==1.10.0', 'importlib_metadata': 'importlib_metadata', 'invisible-watermark': 'invisible-watermark', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2', 'jaxlib': 'jaxlib>=0.1.65', 'Jinja2': 'Jinja2', 'k-diffusion': 'k-diffusion>=0.0.12', 'torchsde': 'torchsde', 'note_seq': 'note_seq', 'librosa': 'librosa', 'numpy': 'numpy', 'omegaconf': 'omegaconf', 'parameterized': 'parameterized', 'protobuf': 'protobuf>=3.20.3,<4', 'pytest': 'pytest', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'ruff': 'ruff>=0.0.241', 'safetensors': 'safetensors', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'scipy': 'scipy', 'onnx': 'onnx', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'tensorboard': 'tensorboard', 'torch': 'torch>=1.4', 'torchvision': 'torchvision', 'transformers': 'transformers>=4.25.1', 'urllib3': 'urllib3<=2.0.0', }
44
0
'''simple docstring''' _lowercase = range(2, 20 + 1) _lowercase = [10**k for k in range(ks[-1] + 1)] _lowercase = {} def __UpperCamelCase ( a : Dict , a : List[str] , a : Dict , a : Any ) ->List[Any]: snake_case = sum(a_i[j] for j in range(a , len(a ) ) ) snake_case = sum(a_i[j] * base[j] for j in range(min(len(a ) , a ) ) ) snake_case , snake_case = 0, 0 snake_case = n - i snake_case = memo.get(a ) if sub_memo is not None: snake_case = sub_memo.get(a ) if jumps is not None and len(a ) > 0: # find and make the largest jump without going over snake_case = -1 for _k in range(len(a ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: snake_case = _k break if max_jump >= 0: snake_case , snake_case , snake_case = jumps[max_jump] # since the difference between jumps is cached, add c snake_case = diff + c for j in range(min(a , len(a ) ) ): snake_case , snake_case = divmod(a , 10 ) if new_c > 0: add(a , a , a ) else: snake_case = [] else: snake_case = {c: []} snake_case = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps snake_case , snake_case = next_term(a , k - 1 , i + dn , a ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead snake_case , snake_case = compute(a , a , i + dn , a ) diff += _diff dn += terms_jumped snake_case = sub_memo[c] # keep jumps sorted by # of terms skipped snake_case = 0 while j < len(a ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(a , (diff, dn, k) ) return (diff, dn) def __UpperCamelCase ( a : List[str] , a : List[str] , a : List[Any] , a : List[str] ) ->List[str]: if i >= n: return 0, i if k > len(a ): a_i.extend([0 for _ in range(k - len(a ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) snake_case = i snake_case , snake_case , snake_case = 0, 0, 0 for j in range(len(a ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 snake_case = ds_c + ds_b diff += addend snake_case = 0 for j in range(a ): snake_case = a_i[j] + addend snake_case , snake_case = divmod(a , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(a , a , a ) return diff, i - start_i def __UpperCamelCase ( a : Optional[int] , a : Union[str, Any] , a : List[Any] ) ->Any: for j in range(a , len(a ) ): snake_case = digits[j] + addend if s >= 10: snake_case , snake_case = divmod(a , 10 ) snake_case = addend // 10 + quotient else: snake_case = s snake_case = addend // 10 if addend == 0: break while addend > 0: snake_case , snake_case = divmod(a , 10 ) digits.append(a ) def __UpperCamelCase ( a : int = 10**15 ) ->int: snake_case = [1] snake_case = 1 snake_case = 0 while True: snake_case , snake_case = next_term(a , 20 , i + dn , a ) dn += terms_jumped if dn == n - i: break snake_case = 0 for j in range(len(a ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(f'{solution() = }')
703
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowercase ( __a , __a , unittest.TestCase ): _UpperCAmelCase = IFInpaintingSuperResolutionPipeline _UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} _UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) _UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''} def UpperCamelCase ( self ) -> int: return self._get_superresolution_dummy_components() def UpperCamelCase ( self , A__ , A__=0 ) -> Union[str, Any]: if str(A__ ).startswith('''mps''' ): snake_case = torch.manual_seed(A__ ) else: snake_case = torch.Generator(device=A__ ).manual_seed(A__ ) snake_case = floats_tensor((1, 3, 16, 16) , rng=random.Random(A__ ) ).to(A__ ) snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ ) snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ ) snake_case = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase ( self ) -> List[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def UpperCamelCase ( self ) -> Optional[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def UpperCamelCase ( self ) -> List[str]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def UpperCamelCase ( self ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def UpperCamelCase ( self ) -> Optional[Any]: self._test_save_load_local() def UpperCamelCase ( self ) -> Dict: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
44
0
'''simple docstring''' import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : def __init__( self , A__ , A__=13 , A__=32 , A__=2 , A__=3 , A__=16 , A__=[32, 64, 1_28] , A__=[1, 2, 1] , A__=[2, 2, 4] , A__=2 , A__=2.0 , A__=True , A__=0.0 , A__=0.0 , A__=0.1 , A__="gelu" , A__=False , A__=True , A__=0.0_2 , A__=1e-5 , A__=True , A__=None , A__=True , A__=10 , A__=8 , A__=["stage1", "stage2"] , A__=[1, 2] , ) -> Union[str, Any]: snake_case = parent snake_case = batch_size snake_case = image_size snake_case = patch_size snake_case = num_channels snake_case = embed_dim snake_case = hidden_sizes snake_case = depths snake_case = num_heads snake_case = window_size snake_case = mlp_ratio snake_case = qkv_bias snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = drop_path_rate snake_case = hidden_act snake_case = use_absolute_embeddings snake_case = patch_norm snake_case = layer_norm_eps snake_case = initializer_range snake_case = is_training snake_case = scope snake_case = use_labels snake_case = type_sequence_label_size snake_case = encoder_stride snake_case = out_features snake_case = out_indices def UpperCamelCase ( self ) -> Optional[int]: snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case = None if self.use_labels: snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self ) -> Optional[Any]: return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> str: snake_case = FocalNetModel(config=A__ ) model.to(A__ ) model.eval() snake_case = model(A__ ) snake_case = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]: snake_case = FocalNetBackbone(config=A__ ) model.to(A__ ) model.eval() snake_case = model(A__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None snake_case = None snake_case = FocalNetBackbone(config=A__ ) model.to(A__ ) model.eval() snake_case = model(A__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Dict: snake_case = FocalNetForMaskedImageModeling(config=A__ ) model.to(A__ ) model.eval() snake_case = model(A__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case = 1 snake_case = FocalNetForMaskedImageModeling(A__ ) model.to(A__ ) model.eval() snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case = model(A__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[Any]: snake_case = self.type_sequence_label_size snake_case = FocalNetForImageClassification(A__ ) model.to(A__ ) model.eval() snake_case = model(A__ , labels=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case = 1 snake_case = FocalNetForImageClassification(A__ ) model.to(A__ ) model.eval() snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase ( self ) -> int: snake_case = self.prepare_config_and_inputs() snake_case , snake_case , snake_case = config_and_inputs snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowercase ( __a , __a , unittest.TestCase ): _UpperCAmelCase = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) _UpperCAmelCase = ( {'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def UpperCamelCase ( self ) -> Dict: snake_case = FocalNetModelTester(self ) snake_case = ConfigTester(self , config_class=A__ , embed_dim=37 , has_text_modality=A__ ) def UpperCamelCase ( self ) -> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase ( self ) -> List[Any]: return def UpperCamelCase ( self ) -> Tuple: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def UpperCamelCase ( self ) -> List[Any]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*A__ ) def UpperCamelCase ( self ) -> Optional[int]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*A__ ) def UpperCamelCase ( self ) -> Any: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A__ ) @unittest.skip(reason='''FocalNet does not use inputs_embeds''' ) def UpperCamelCase ( self ) -> List[Any]: pass @unittest.skip(reason='''FocalNet does not use feedforward chunking''' ) def UpperCamelCase ( self ) -> Optional[int]: pass def UpperCamelCase ( self ) -> int: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: snake_case = model_class(A__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A__ , nn.Linear ) ) def UpperCamelCase ( self ) -> List[str]: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: snake_case = model_class(A__ ) snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case = [*signature.parameters.keys()] snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A__ ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ ) -> Optional[Any]: snake_case = model_class(A__ ) model.to(A__ ) model.eval() with torch.no_grad(): snake_case = model(**self._prepare_for_class(A__ , A__ ) ) snake_case = outputs.hidden_states snake_case = getattr( self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(A__ ) , A__ ) # FocalNet has a different seq_length snake_case = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) snake_case = outputs.reshaped_hidden_states self.assertEqual(len(A__ ) , A__ ) snake_case , snake_case , snake_case , snake_case = reshaped_hidden_states[0].shape snake_case = ( reshaped_hidden_states[0].view(A__ , A__ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCamelCase ( self ) -> Optional[int]: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() snake_case = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: snake_case = True self.check_hidden_states_output(A__ , A__ , A__ , A__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case = True self.check_hidden_states_output(A__ , A__ , A__ , A__ ) def UpperCamelCase ( self ) -> Dict: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() snake_case = 3 snake_case = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: snake_case = True self.check_hidden_states_output(A__ , A__ , A__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case = True self.check_hidden_states_output(A__ , A__ , A__ , (padded_height, padded_width) ) @slow def UpperCamelCase ( self ) -> Optional[Any]: for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case = FocalNetModel.from_pretrained(A__ ) self.assertIsNotNone(A__ ) def UpperCamelCase ( self ) -> int: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() snake_case = _config_zero_init(A__ ) for model_class in self.all_model_classes: snake_case = model_class(config=A__ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class _lowercase ( unittest.TestCase ): @cached_property def UpperCamelCase ( self ) -> Optional[int]: # TODO update organization return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None @slow def UpperCamelCase ( self ) -> Tuple: snake_case = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(A__ ) snake_case = self.default_image_processor snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) snake_case = image_processor(images=A__ , return_tensors='''pt''' ).to(A__ ) # forward pass with torch.no_grad(): snake_case = model(**A__ ) # verify the logits snake_case = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , A__ ) snake_case = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(A__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 ) @require_torch class _lowercase ( __a , unittest.TestCase ): _UpperCAmelCase = (FocalNetBackbone,) if is_torch_available() else () _UpperCAmelCase = FocalNetConfig _UpperCAmelCase = False def UpperCamelCase ( self ) -> Tuple: snake_case = FocalNetModelTester(self )
704
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy _lowercase = logging.get_logger(__name__) class _lowercase ( __a ): def __init__( self , A__ , A__ , A__ , **A__ ) -> Union[str, Any]: snake_case = feature_size snake_case = sampling_rate snake_case = padding_value snake_case = kwargs.pop('''padding_side''' , '''right''' ) snake_case = kwargs.pop('''return_attention_mask''' , A__ ) super().__init__(**A__ ) def UpperCamelCase ( self , A__ , A__ = True , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , ) -> BatchFeature: # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(A__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): snake_case = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( '''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`''' F""" to this method that includes {self.model_input_names[0]}, but you provided""" F""" {list(processed_features.keys() )}""" ) snake_case = processed_features[self.model_input_names[0]] snake_case = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(A__ ) == 0: if return_attention_mask: snake_case = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch snake_case = required_input[0] if isinstance(A__ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. snake_case = 0 while len(required_input[index] ) == 0: index += 1 if index < len(A__ ): snake_case = required_input[index][0] if return_tensors is None: if is_tf_tensor(A__ ): snake_case = '''tf''' elif is_torch_tensor(A__ ): snake_case = '''pt''' elif isinstance(A__ , (int, float, list, tuple, np.ndarray) ): snake_case = '''np''' else: raise ValueError( F"""type of {first_element} unknown: {type(A__ )}. """ '''Should be one of a python, numpy, pytorch or tensorflow object.''' ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): snake_case = to_numpy(A__ ) else: snake_case = [to_numpy(A__ ) for v in value] # Convert padding_strategy in PaddingStrategy snake_case = self._get_padding_strategies(padding=A__ , max_length=A__ ) snake_case = processed_features[self.model_input_names[0]] snake_case = len(A__ ) if not all(len(A__ ) == batch_size for v in processed_features.values() ): raise ValueError('''Some items in the output dictionary have a different batch size than others.''' ) snake_case = [] for i in range(A__ ): snake_case = {k: v[i] for k, v in processed_features.items()} # truncation snake_case = self._truncate( A__ , max_length=A__ , pad_to_multiple_of=A__ , truncation=A__ , ) truncated_inputs.append(A__ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) snake_case = PaddingStrategy.MAX_LENGTH snake_case = {} for i in range(A__ ): # padding snake_case = self._pad( truncated_inputs[i] , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , ) for key, value in outputs.items(): if key not in batch_outputs: snake_case = [] if value.dtype is np.dtype(np.floataa ): snake_case = value.astype(np.floataa ) batch_outputs[key].append(A__ ) return BatchFeature(A__ , tensor_type=A__ ) def UpperCamelCase ( self , A__ , A__ = None , A__ = PaddingStrategy.DO_NOT_PAD , A__ = None , A__ = None , ) -> dict: snake_case = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: snake_case = len(A__ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A__ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: snake_case = np.ones(len(A__ ) , dtype=np.intaa ) if needs_to_be_padded: snake_case = max_length - len(A__ ) if self.padding_side == "right": if return_attention_mask: snake_case = np.pad( processed_features['''attention_mask'''] , (0, difference) ) snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) snake_case = np.pad( A__ , A__ , '''constant''' , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: snake_case = np.pad( processed_features['''attention_mask'''] , (difference, 0) ) snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) snake_case = np.pad( A__ , A__ , '''constant''' , constant_values=self.padding_value ) else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return processed_features def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , ) -> Union[str, Any]: if not truncation: return processed_features elif truncation and max_length is None: raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' ) snake_case = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of snake_case = len(A__ ) > max_length if needs_to_be_truncated: snake_case = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: snake_case = processed_features['''attention_mask'''][:max_length] return processed_features def UpperCamelCase ( self , A__=False , A__=None ) -> Union[str, Any]: # Get padding strategy if padding is not False: if padding is True: snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(A__ , A__ ): snake_case = PaddingStrategy(A__ ) elif isinstance(A__ , A__ ): snake_case = padding else: snake_case = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( '''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use''' ''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' ) return padding_strategy
44
0
'''simple docstring''' import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def __UpperCamelCase ( a : Any , a : List[str]=() , a : Tuple=None , a : List[str]="no" , a : Dict="29500" ) ->int: snake_case = False snake_case = False if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ): snake_case = True elif "IPython" in sys.modules: snake_case = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() ) try: snake_case = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" ) if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , a ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( '''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside ''' '''your training function. Restart your notebook and make sure no cells initializes an ''' '''`Accelerator`.''' ) if num_processes is None: snake_case = 8 snake_case = PrepareForLaunch(a , distributed_type='''TPU''' ) print(f"""Launching a training on {num_processes} TPU cores.""" ) xmp.spawn(a , args=a , nprocs=a , start_method='''fork''' ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print('''Launching training on one GPU.''' ) else: print('''Launching training on one CPU.''' ) function(*a ) else: if num_processes is None: raise ValueError( '''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( '''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized ''' '''inside your training function. Restart your notebook and make sure no cells initializes an ''' '''`Accelerator`.''' ) if torch.cuda.is_initialized(): raise ValueError( '''To launch a multi-GPU training from your notebook, you need to avoid running any instruction ''' '''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA ''' '''function.''' ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=a , master_addr='''127.0.01''' , master_port=a , mixed_precision=a ): snake_case = PrepareForLaunch(a , distributed_type='''MULTI_GPU''' ) print(f"""Launching training on {num_processes} GPUs.""" ) try: start_processes(a , args=a , nprocs=a , start_method='''fork''' ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( '''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. ''' '''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. ''' '''Please review your imports and test them when running the `notebook_launcher()` to identify ''' '''which one is problematic.''' ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): snake_case = '''1''' print('''Launching training on MPS.''' ) elif torch.cuda.is_available(): print('''Launching training on one GPU.''' ) else: print('''Launching training on CPU.''' ) function(*a ) def __UpperCamelCase ( a : Dict , a : Optional[int]=() , a : Dict=2 ) ->int: from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=a , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ): snake_case = PrepareForLaunch(a , debug=a ) start_processes(a , args=a , nprocs=a , start_method='''fork''' )
705
'''simple docstring''' from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _lowercase ( yaml.SafeLoader ): def UpperCamelCase ( self , A__ ) -> List[str]: snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value] snake_case = [tuple(A__ ) if isinstance(A__ , A__ ) else key for key in keys] snake_case = Counter(A__ ) snake_case = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def UpperCamelCase ( self , A__ , A__=False ) -> List[Any]: snake_case = super().construct_mapping(A__ , deep=A__ ) self._check_no_duplicates_on_constructed_node(A__ ) return mapping def __UpperCamelCase ( a : str ) ->Tuple[Optional[str], str]: snake_case = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: snake_case = full_content[1:].index('''---''' ) + 1 snake_case = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(a ) class _lowercase ( __a ): # class attributes _UpperCAmelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata @classmethod def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata": with open(A__ , encoding='''utf-8''' ) as readme_file: snake_case , snake_case = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(A__ ) else: return cls() def UpperCamelCase ( self , A__ ) -> str: if path.exists(): with open(A__ , encoding='''utf-8''' ) as readme_file: snake_case = readme_file.read() else: snake_case = None snake_case = self._to_readme(A__ ) with open(A__ , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(A__ ) def UpperCamelCase ( self , A__ = None ) -> str: if readme_content is not None: snake_case , snake_case = _split_yaml_from_readme(A__ ) snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata": snake_case = yaml.load(A__ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields snake_case = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**A__ ) def UpperCamelCase ( self ) -> str: return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=A__ , allow_unicode=A__ , encoding='''utf-8''' , ).decode('''utf-8''' ) _lowercase = { 'image-classification': [], 'translation': [], 'image-segmentation': [], 'fill-mask': [], 'automatic-speech-recognition': [], 'token-classification': [], 'sentence-similarity': [], 'audio-classification': [], 'question-answering': [], 'summarization': [], 'zero-shot-classification': [], 'table-to-text': [], 'feature-extraction': [], 'other': [], 'multiple-choice': [], 'text-classification': [], 'text-to-image': [], 'text2text-generation': [], 'zero-shot-image-classification': [], 'tabular-classification': [], 'tabular-regression': [], 'image-to-image': [], 'tabular-to-text': [], 'unconditional-image-generation': [], 'text-retrieval': [], 'text-to-speech': [], 'object-detection': [], 'audio-to-audio': [], 'text-generation': [], 'conversational': [], 'table-question-answering': [], 'visual-question-answering': [], 'image-to-text': [], 'reinforcement-learning': [], 'voice-activity-detection': [], 'time-series-forecasting': [], 'document-question-answering': [], } if __name__ == "__main__": from argparse import ArgumentParser _lowercase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.') ap.add_argument('readme_filepath') _lowercase = ap.parse_args() _lowercase = Path(args.readme_filepath) _lowercase = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
44
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy _lowercase = logging.get_logger(__name__) class _lowercase ( __a ): def __init__( self , A__ , A__ , A__ , **A__ ) -> Union[str, Any]: snake_case = feature_size snake_case = sampling_rate snake_case = padding_value snake_case = kwargs.pop('''padding_side''' , '''right''' ) snake_case = kwargs.pop('''return_attention_mask''' , A__ ) super().__init__(**A__ ) def UpperCamelCase ( self , A__ , A__ = True , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , ) -> BatchFeature: # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(A__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): snake_case = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( '''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`''' F""" to this method that includes {self.model_input_names[0]}, but you provided""" F""" {list(processed_features.keys() )}""" ) snake_case = processed_features[self.model_input_names[0]] snake_case = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(A__ ) == 0: if return_attention_mask: snake_case = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch snake_case = required_input[0] if isinstance(A__ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. snake_case = 0 while len(required_input[index] ) == 0: index += 1 if index < len(A__ ): snake_case = required_input[index][0] if return_tensors is None: if is_tf_tensor(A__ ): snake_case = '''tf''' elif is_torch_tensor(A__ ): snake_case = '''pt''' elif isinstance(A__ , (int, float, list, tuple, np.ndarray) ): snake_case = '''np''' else: raise ValueError( F"""type of {first_element} unknown: {type(A__ )}. """ '''Should be one of a python, numpy, pytorch or tensorflow object.''' ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): snake_case = to_numpy(A__ ) else: snake_case = [to_numpy(A__ ) for v in value] # Convert padding_strategy in PaddingStrategy snake_case = self._get_padding_strategies(padding=A__ , max_length=A__ ) snake_case = processed_features[self.model_input_names[0]] snake_case = len(A__ ) if not all(len(A__ ) == batch_size for v in processed_features.values() ): raise ValueError('''Some items in the output dictionary have a different batch size than others.''' ) snake_case = [] for i in range(A__ ): snake_case = {k: v[i] for k, v in processed_features.items()} # truncation snake_case = self._truncate( A__ , max_length=A__ , pad_to_multiple_of=A__ , truncation=A__ , ) truncated_inputs.append(A__ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) snake_case = PaddingStrategy.MAX_LENGTH snake_case = {} for i in range(A__ ): # padding snake_case = self._pad( truncated_inputs[i] , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , ) for key, value in outputs.items(): if key not in batch_outputs: snake_case = [] if value.dtype is np.dtype(np.floataa ): snake_case = value.astype(np.floataa ) batch_outputs[key].append(A__ ) return BatchFeature(A__ , tensor_type=A__ ) def UpperCamelCase ( self , A__ , A__ = None , A__ = PaddingStrategy.DO_NOT_PAD , A__ = None , A__ = None , ) -> dict: snake_case = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: snake_case = len(A__ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A__ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: snake_case = np.ones(len(A__ ) , dtype=np.intaa ) if needs_to_be_padded: snake_case = max_length - len(A__ ) if self.padding_side == "right": if return_attention_mask: snake_case = np.pad( processed_features['''attention_mask'''] , (0, difference) ) snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) snake_case = np.pad( A__ , A__ , '''constant''' , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: snake_case = np.pad( processed_features['''attention_mask'''] , (difference, 0) ) snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) snake_case = np.pad( A__ , A__ , '''constant''' , constant_values=self.padding_value ) else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return processed_features def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , ) -> Union[str, Any]: if not truncation: return processed_features elif truncation and max_length is None: raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' ) snake_case = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of snake_case = len(A__ ) > max_length if needs_to_be_truncated: snake_case = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: snake_case = processed_features['''attention_mask'''][:max_length] return processed_features def UpperCamelCase ( self , A__=False , A__=None ) -> Union[str, Any]: # Get padding strategy if padding is not False: if padding is True: snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(A__ , A__ ): snake_case = PaddingStrategy(A__ ) elif isinstance(A__ , A__ ): snake_case = padding else: snake_case = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( '''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use''' ''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' ) return padding_strategy
706
'''simple docstring''' import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowercase ( __a , unittest.TestCase ): _UpperCAmelCase = CodeGenTokenizer _UpperCAmelCase = CodeGenTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = {'''add_prefix_space''': True} _UpperCAmelCase = False def UpperCamelCase ( self ) -> Tuple: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] snake_case = dict(zip(A__ , range(len(A__ ) ) ) ) snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] snake_case = {'''unk_token''': '''<unk>'''} snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(A__ ) ) def UpperCamelCase ( self , **A__ ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self , **A__ ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self , A__ ) -> Tuple: snake_case = '''lower newer''' snake_case = '''lower newer''' return input_text, output_text def UpperCamelCase ( self ) -> List[Any]: snake_case = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case = '''lower newer''' snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ ) self.assertListEqual(A__ , A__ ) snake_case = tokens + [tokenizer.unk_token] snake_case = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ ) def UpperCamelCase ( self ) -> Optional[int]: if not self.test_rust_tokenizer: return snake_case = self.get_tokenizer() snake_case = self.get_rust_tokenizer(add_prefix_space=A__ ) snake_case = '''lower newer''' # Testing tokenization snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ ) snake_case = rust_tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) # Testing conversion to ids without special tokens snake_case = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ ) snake_case = rust_tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertListEqual(A__ , A__ ) # Testing conversion to ids with special tokens snake_case = self.get_rust_tokenizer(add_prefix_space=A__ ) snake_case = tokenizer.encode(A__ , add_prefix_space=A__ ) snake_case = rust_tokenizer.encode(A__ ) self.assertListEqual(A__ , A__ ) # Testing the unknown token snake_case = tokens + [rust_tokenizer.unk_token] snake_case = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A__ ) , A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> List[str]: # It's very difficult to mix/test pretokenization with byte-level # And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def UpperCamelCase ( self , A__=15 ) -> Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) # Simple input snake_case = '''This is a simple input''' snake_case = ['''This is a simple input 1''', '''This is a simple input 2'''] snake_case = ('''This is a simple input''', '''This is a pair''') snake_case = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' ) # Simple input self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' ) # Simple input self.assertRaises( A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , ) # Pair input self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' ) # Pair input self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' ) # Pair input self.assertRaises( A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , ) def UpperCamelCase ( self ) -> Tuple: snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input snake_case = '''This is a simple input''' snake_case = ['''This is a simple input looooooooong''', '''This is a simple input'''] snake_case = ('''This is a simple input''', '''This is a pair''') snake_case = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] snake_case = tokenizer.pad_token_id snake_case = tokenizer(A__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' ) snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' ) snake_case = tokenizer(*A__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' ) snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def UpperCamelCase ( self ) -> str: snake_case = '''$$$''' snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=A__ , add_bos_token=A__ ) snake_case = '''This is a simple input''' snake_case = ['''This is a simple input 1''', '''This is a simple input 2'''] snake_case = tokenizer.bos_token_id snake_case = tokenizer(A__ ) snake_case = tokenizer(A__ ) self.assertEqual(out_s.input_ids[0] , A__ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) snake_case = tokenizer.decode(out_s.input_ids ) snake_case = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , A__ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def UpperCamelCase ( self ) -> Any: snake_case = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' ) snake_case = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#''' snake_case = '''\nif len_a > len_b: result = a\nelse: result = b''' snake_case = tokenizer.encode(A__ ) snake_case = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n'''] snake_case = tokenizer.decode(A__ , truncate_before_pattern=A__ ) self.assertEqual(A__ , A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: pass
44
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def __UpperCamelCase ( a : str , a : Tuple=False , a : Tuple=False , a : Union[str, Any]=False ) ->Any: snake_case = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""transformer.blocks.{i}.norm1.weight""", f"""vilt.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""transformer.blocks.{i}.norm1.bias""", f"""vilt.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""transformer.blocks.{i}.attn.proj.weight""", f"""vilt.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (f"""transformer.blocks.{i}.attn.proj.bias""", f"""vilt.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""transformer.blocks.{i}.norm2.weight""", f"""vilt.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""transformer.blocks.{i}.norm2.bias""", f"""vilt.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (f"""transformer.blocks.{i}.mlp.fc1.weight""", f"""vilt.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""transformer.blocks.{i}.mlp.fc1.bias""", f"""vilt.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.weight""", f"""vilt.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.bias""", f"""vilt.encoder.layer.{i}.output.dense.bias""") ) # embeddings rename_keys.extend( [ # text embeddings ('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''), ( '''text_embeddings.position_embeddings.weight''', '''vilt.embeddings.text_embeddings.position_embeddings.weight''', ), ('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''), ( '''text_embeddings.token_type_embeddings.weight''', '''vilt.embeddings.text_embeddings.token_type_embeddings.weight''', ), ('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''), ('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''), # patch embeddings ('''transformer.cls_token''', '''vilt.embeddings.cls_token'''), ('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''), ('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''), ('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''), # token type embeddings ('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''), ] ) # final layernorm + pooler rename_keys.extend( [ ('''transformer.norm.weight''', '''vilt.layernorm.weight'''), ('''transformer.norm.bias''', '''vilt.layernorm.bias'''), ('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''), ('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('''vqa_classifier.0.weight''', '''classifier.0.weight'''), ('''vqa_classifier.0.bias''', '''classifier.0.bias'''), ('''vqa_classifier.1.weight''', '''classifier.1.weight'''), ('''vqa_classifier.1.bias''', '''classifier.1.bias'''), ('''vqa_classifier.3.weight''', '''classifier.3.weight'''), ('''vqa_classifier.3.bias''', '''classifier.3.bias'''), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''), ('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''), ('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''), ('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''), ('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''), ('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''), ] ) else: pass return rename_keys def __UpperCamelCase ( a : int , a : Any ) ->str: for i in range(config.num_hidden_layers ): snake_case = '''vilt.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.weight""" ) snake_case = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case = in_proj_weight[ : config.hidden_size, : ] snake_case = in_proj_bias[: config.hidden_size] snake_case = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case = in_proj_weight[ -config.hidden_size :, : ] snake_case = in_proj_bias[-config.hidden_size :] def __UpperCamelCase ( a : int ) ->List[str]: snake_case = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(a , a ) def __UpperCamelCase ( a : Dict , a : List[str] , a : List[Any] ) ->List[str]: snake_case = dct.pop(a ) snake_case = val @torch.no_grad() def __UpperCamelCase ( a : List[str] , a : List[Any] ) ->Optional[Any]: snake_case = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=a ) snake_case = False snake_case = False snake_case = False snake_case = False if "vqa" in checkpoint_url: snake_case = True snake_case = 3129 snake_case = '''huggingface/label-files''' snake_case = '''vqa2-id2label.json''' snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) ) snake_case = {int(a ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} snake_case = ViltForQuestionAnswering(a ) elif "nlvr" in checkpoint_url: snake_case = True snake_case = 2 snake_case = {0: '''False''', 1: '''True'''} snake_case = {v: k for k, v in config.idalabel.items()} snake_case = 3 snake_case = ViltForImagesAndTextClassification(a ) elif "irtr" in checkpoint_url: snake_case = True snake_case = ViltForImageAndTextRetrieval(a ) elif "mlm_itm" in checkpoint_url: snake_case = True snake_case = ViltForMaskedLM(a ) else: raise ValueError('''Unknown model type''' ) # load state_dict of original model, remove and rename some keys snake_case = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' )['''state_dict'''] snake_case = create_rename_keys(a , a , a , a ) for src, dest in rename_keys: rename_key(a , a , a ) read_in_q_k_v(a , a ) if mlm_model or irtr_model: snake_case = ['''itm_score.fc.weight''', '''itm_score.fc.bias'''] for k in ignore_keys: state_dict.pop(a , a ) # load state dict into HuggingFace model model.eval() if mlm_model: snake_case , snake_case = model.load_state_dict(a , strict=a ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(a ) # Define processor snake_case = ViltImageProcessor(size=384 ) snake_case = BertTokenizer.from_pretrained('''bert-base-uncased''' ) snake_case = ViltProcessor(a , a ) # Forward pass on example inputs (image + text) if nlvr_model: snake_case = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=a ).raw ) snake_case = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=a ).raw ) snake_case = ( '''The left image contains twice the number of dogs as the right image, and at least two dogs in total are''' ''' standing.''' ) snake_case = processor(a , a , return_tensors='''pt''' ) snake_case = processor(a , a , return_tensors='''pt''' ) snake_case = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: snake_case = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=a ).raw ) if mlm_model: snake_case = '''a bunch of [MASK] laying on a [MASK].''' else: snake_case = '''How many cats are there?''' snake_case = processor(a , a , return_tensors='''pt''' ) snake_case = model(**a ) # Verify outputs if mlm_model: snake_case = torch.Size([1, 11, 3_0522] ) snake_case = torch.tensor([-12.5061, -12.5123, -12.5174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 ) # verify masked token prediction equals "cats" snake_case = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: snake_case = torch.Size([1, 3129] ) snake_case = torch.tensor([-15.9495, -18.1472, -10.3041] ) assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 ) # verify vqa prediction equals "2" snake_case = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: snake_case = torch.Size([1, 2] ) snake_case = torch.tensor([-2.8721, 2.1291] ) assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) assert outputs.logits.shape == expected_shape Path(a ).mkdir(exist_ok=a ) print(f"""Saving model and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(a ) processor.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt', type=str, help='URL of the checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) _lowercase = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
707
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowercase : def __init__( self , A__ , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.0_2 , A__=3 , A__=None , ) -> List[Any]: snake_case = parent snake_case = batch_size snake_case = image_size snake_case = patch_size snake_case = num_channels snake_case = is_training snake_case = use_labels snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = intermediate_size snake_case = hidden_act snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = type_sequence_label_size snake_case = initializer_range snake_case = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case = (image_size // patch_size) ** 2 snake_case = num_patches + 1 def UpperCamelCase ( self ) -> int: snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case = None if self.use_labels: snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self ) -> int: return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]: snake_case = TFViTModel(config=A__ ) snake_case = model(A__ , training=A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. snake_case = self.image_size // 2 snake_case = pixel_values[:, :, :image_size, :image_size] snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ ) snake_case = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[int]: snake_case = self.type_sequence_label_size snake_case = TFViTForImageClassification(A__ ) snake_case = model(A__ , labels=A__ , training=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. snake_case = self.image_size // 2 snake_case = pixel_values[:, :, :image_size, :image_size] snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case = 1 snake_case = TFViTForImageClassification(A__ ) snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.prepare_config_and_inputs() snake_case , snake_case , snake_case = config_and_inputs snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class _lowercase ( __a , __a , unittest.TestCase ): _UpperCAmelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () _UpperCAmelCase = ( {'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def UpperCamelCase ( self ) -> List[Any]: snake_case = TFViTModelTester(self ) snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 ) def UpperCamelCase ( self ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def UpperCamelCase ( self ) -> int: pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def UpperCamelCase ( self ) -> str: pass def UpperCamelCase ( self ) -> Union[str, Any]: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = model_class(A__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) snake_case = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A__ , tf.keras.layers.Layer ) ) def UpperCamelCase ( self ) -> List[Any]: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = model_class(A__ ) snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case = [*signature.parameters.keys()] snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def UpperCamelCase ( self ) -> Optional[Any]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A__ ) @slow def UpperCamelCase ( self ) -> Any: snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(A__ ) def __UpperCamelCase ( ) ->Any: snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class _lowercase ( unittest.TestCase ): @cached_property def UpperCamelCase ( self ) -> Optional[int]: return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def UpperCamelCase ( self ) -> Dict: snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) snake_case = self.default_image_processor snake_case = prepare_img() snake_case = image_processor(images=A__ , return_tensors='''tf''' ) # forward pass snake_case = model(**A__ ) # verify the logits snake_case = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , A__ ) snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ) tf.debugging.assert_near(outputs.logits[0, :3] , A__ , atol=1e-4 )
44
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowercase = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
708
'''simple docstring''' import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path _lowercase = [ {'dataset': 'wikipedia', 'config_name': '20220301.de'}, {'dataset': 'wikipedia', 'config_name': '20220301.en'}, {'dataset': 'wikipedia', 'config_name': '20220301.fr'}, {'dataset': 'wikipedia', 'config_name': '20220301.frr'}, {'dataset': 'wikipedia', 'config_name': '20220301.it'}, {'dataset': 'wikipedia', 'config_name': '20220301.simple'}, {'dataset': 'snli', 'config_name': 'plain_text'}, {'dataset': 'eli5', 'config_name': 'LFQA_reddit'}, {'dataset': 'wiki40b', 'config_name': 'en'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'}, {'dataset': 'natural_questions', 'config_name': 'default'}, ] def __UpperCamelCase ( a : Dict=True ) ->str: if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__a ) ) class _lowercase ( __a ): _UpperCAmelCase = None _UpperCAmelCase = None def UpperCamelCase ( self , A__ , A__ ) -> str: with TemporaryDirectory() as tmp_dir: snake_case = dataset_module_factory(A__ , cache_dir=A__ ) snake_case = import_main_class(dataset_module.module_path , dataset=A__ ) snake_case = builder_cls( cache_dir=A__ , config_name=A__ , hash=dataset_module.hash , ) snake_case = '''/'''.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=A__ ).replace(os.sep , '''/''' ), config.DATASET_INFO_FILENAME, ] ) snake_case = cached_path(A__ , cache_dir=A__ ) self.assertTrue(os.path.exists(A__ ) ) @pytest.mark.integration def __UpperCamelCase ( a : List[str] ) ->Any: snake_case = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple''' snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a ) snake_case = import_main_class(dataset_module.module_path ) snake_case = builder_cls( cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam snake_case = None builder_instance.download_and_prepare() snake_case = builder_instance.as_dataset() assert ds @pytest.mark.integration def __UpperCamelCase ( a : Any ) ->Union[str, Any]: snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a ) snake_case = import_main_class(dataset_module.module_path , dataset=a ) snake_case = builder_cls( cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , ) snake_case = builder_instance.as_streaming_dataset() assert ds assert isinstance(a , a ) assert "train" in ds assert isinstance(ds['''train'''] , a ) assert next(iter(ds['''train'''] ) )
44
0
'''simple docstring''' import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _lowercase ( unittest.TestCase ): def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=4_00 , A__=True , A__=None , A__=True , A__=None , A__=True , A__=[0.5, 0.5, 0.5] , A__=[0.5, 0.5, 0.5] , A__=False , ) -> int: snake_case = size if size is not None else {'''height''': 20, '''width''': 20} snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} snake_case = parent snake_case = batch_size snake_case = num_channels snake_case = image_size snake_case = min_resolution snake_case = max_resolution snake_case = do_resize snake_case = size snake_case = do_center_crop snake_case = crop_size snake_case = do_normalize snake_case = image_mean snake_case = image_std snake_case = do_reduce_labels def UpperCamelCase ( self ) -> Optional[Any]: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def __UpperCamelCase ( ) ->Tuple: snake_case = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) snake_case = Image.open(dataset[0]['''file'''] ) snake_case = Image.open(dataset[1]['''file'''] ) return image, map def __UpperCamelCase ( ) ->List[str]: snake_case = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) snake_case = Image.open(ds[0]['''file'''] ) snake_case = Image.open(ds[1]['''file'''] ) snake_case = Image.open(ds[2]['''file'''] ) snake_case = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class _lowercase ( __a , unittest.TestCase ): _UpperCAmelCase = BeitImageProcessor if is_vision_available() else None def UpperCamelCase ( self ) -> Optional[int]: snake_case = BeitImageProcessingTester(self ) @property def UpperCamelCase ( self ) -> Any: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self ) -> List[Any]: snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A__ , '''do_resize''' ) ) self.assertTrue(hasattr(A__ , '''size''' ) ) self.assertTrue(hasattr(A__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(A__ , '''center_crop''' ) ) self.assertTrue(hasattr(A__ , '''do_normalize''' ) ) self.assertTrue(hasattr(A__ , '''image_mean''' ) ) self.assertTrue(hasattr(A__ , '''image_std''' ) ) def UpperCamelCase ( self ) -> Dict: snake_case = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , A__ ) snake_case = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=A__ ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: pass def UpperCamelCase ( self ) -> Optional[Any]: # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ ) for image in image_inputs: self.assertIsInstance(A__ , Image.Image ) # Test not batched input snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase ( self ) -> Optional[int]: # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ ) for image in image_inputs: self.assertIsInstance(A__ , np.ndarray ) # Test not batched input snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase ( self ) -> List[Any]: # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ ) for image in image_inputs: self.assertIsInstance(A__ , torch.Tensor ) # Test not batched input snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase ( self ) -> Union[str, Any]: # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ ) snake_case = [] for image in image_inputs: self.assertIsInstance(A__ , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input snake_case = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched snake_case = image_processing(A__ , A__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test not batched input (PIL images) snake_case , snake_case = prepare_semantic_single_inputs() snake_case = image_processing(A__ , A__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched input (PIL images) snake_case , snake_case = prepare_semantic_batch_inputs() snake_case = image_processing(A__ , A__ , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) def UpperCamelCase ( self ) -> Any: # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 snake_case , snake_case = prepare_semantic_single_inputs() snake_case = image_processing(A__ , A__ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 1_50 ) snake_case = True snake_case = image_processing(A__ , A__ , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
709
'''simple docstring''' def __UpperCamelCase ( a : int , a : int ) ->int: while b: snake_case , snake_case = b, a % b return a def __UpperCamelCase ( a : int , a : int ) ->int: return a if b == 0 else euclidean_gcd_recursive(a , a % b ) def __UpperCamelCase ( ) ->Optional[Any]: print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
44
0
'''simple docstring''' import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class _lowercase ( __a ): _UpperCAmelCase = '''MCTCTFeatureExtractor''' _UpperCAmelCase = '''AutoTokenizer''' def __init__( self , A__ , A__ ) -> Union[str, Any]: super().__init__(A__ , A__ ) snake_case = self.feature_extractor snake_case = False def __call__( self , *A__ , **A__ ) -> Tuple: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*A__ , **A__ ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) snake_case = kwargs.pop('''raw_speech''' ) else: snake_case = kwargs.pop('''audio''' , A__ ) snake_case = kwargs.pop('''sampling_rate''' , A__ ) snake_case = kwargs.pop('''text''' , A__ ) if len(A__ ) > 0: snake_case = args[0] snake_case = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: snake_case = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ ) if text is not None: snake_case = self.tokenizer(A__ , **A__ ) if text is None: return inputs elif audio is None: return encodings else: snake_case = encodings['''input_ids'''] return inputs def UpperCamelCase ( self , *A__ , **A__ ) -> List[str]: return self.tokenizer.batch_decode(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> Dict: # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*A__ , **A__ ) snake_case = kwargs.pop('''input_features''' , A__ ) snake_case = kwargs.pop('''labels''' , A__ ) if len(A__ ) > 0: snake_case = args[0] snake_case = args[1:] if input_features is not None: snake_case = self.feature_extractor.pad(A__ , *A__ , **A__ ) if labels is not None: snake_case = self.tokenizer.pad(A__ , **A__ ) if labels is None: return input_features elif input_features is None: return labels else: snake_case = labels['''input_ids'''] return input_features def UpperCamelCase ( self , *A__ , **A__ ) -> Dict: return self.tokenizer.decode(*A__ , **A__ ) @contextmanager def UpperCamelCase ( self ) -> Any: warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) snake_case = True snake_case = self.tokenizer yield snake_case = self.feature_extractor snake_case = False
710
'''simple docstring''' import argparse import copy def __UpperCamelCase ( a : Union[str, Any] ) ->Tuple: snake_case = {} with open(a ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: snake_case = [] _list.append([line.split()[1], line.split()[2]] ) snake_case = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: snake_case = [] _list.append([line.split()[0], line.split()[2]] ) snake_case = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def __UpperCamelCase ( a : Dict , a : Tuple ) ->int: with open(a ) as f: snake_case = f.read(1 ) snake_case = start_node snake_case = [] snake_case = start_node snake_case = 0 while visiting not in first_solution: snake_case = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(a ) and k[0] not in first_solution: snake_case = k[1] snake_case = k[0] first_solution.append(a ) snake_case = distance_of_first_solution + int(a ) snake_case = best_node first_solution.append(a ) snake_case = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 snake_case = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def __UpperCamelCase ( a : Optional[int] , a : str ) ->str: snake_case = [] for n in solution[1:-1]: snake_case = solution.index(a ) for kn in solution[1:-1]: snake_case = solution.index(a ) if n == kn: continue snake_case = copy.deepcopy(a ) snake_case = kn snake_case = n snake_case = 0 for k in _tmp[:-1]: snake_case = _tmp[_tmp.index(a ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: snake_case = distance + int(i[1] ) _tmp.append(a ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) snake_case = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda a : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def __UpperCamelCase ( a : Any , a : Optional[Any] , a : int , a : Optional[int] , a : Union[str, Any] ) ->List[Any]: snake_case = 1 snake_case = first_solution snake_case = [] snake_case = distance_of_first_solution snake_case = solution while count <= iters: snake_case = find_neighborhood(a , a ) snake_case = 0 snake_case = neighborhood[index_of_best_solution] snake_case = len(a ) - 1 snake_case = False while not found: snake_case = 0 while i < len(a ): if best_solution[i] != solution[i]: snake_case = best_solution[i] snake_case = solution[i] break snake_case = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) snake_case = True snake_case = best_solution[:-1] snake_case = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: snake_case = cost snake_case = solution else: snake_case = index_of_best_solution + 1 snake_case = neighborhood[index_of_best_solution] if len(a ) >= size: tabu_list.pop(0 ) snake_case = count + 1 return best_solution_ever, best_cost def __UpperCamelCase ( a : Union[str, Any]=None ) ->Optional[Any]: snake_case = generate_neighbours(args.File ) snake_case , snake_case = generate_first_solution( args.File , a ) snake_case , snake_case = tabu_search( a , a , a , args.Iterations , args.Size , ) print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
44
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { 'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ 'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimesformerModel', 'TimesformerForVideoClassification', 'TimesformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
711
'''simple docstring''' from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
44
0
def __UpperCamelCase ( a : Optional[int] ) ->str: snake_case = len(a ) snake_case = sum(a ) snake_case = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): snake_case = True for i in range(1 , s + 1 ): snake_case = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): snake_case = dp[i][j - 1] if arr[i - 1] <= j: snake_case = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: snake_case = s - 2 * j break return diff
712
'''simple docstring''' from ...processing_utils import ProcessorMixin class _lowercase ( __a ): _UpperCAmelCase = '''WhisperFeatureExtractor''' _UpperCAmelCase = '''WhisperTokenizer''' def __init__( self , A__ , A__ ) -> Optional[Any]: super().__init__(A__ , A__ ) snake_case = self.feature_extractor snake_case = False def UpperCamelCase ( self , A__=None , A__=None , A__=True ) -> Union[str, Any]: return self.tokenizer.get_decoder_prompt_ids(task=A__ , language=A__ , no_timestamps=A__ ) def __call__( self , *A__ , **A__ ) -> Dict: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*A__ , **A__ ) snake_case = kwargs.pop('''audio''' , A__ ) snake_case = kwargs.pop('''sampling_rate''' , A__ ) snake_case = kwargs.pop('''text''' , A__ ) if len(A__ ) > 0: snake_case = args[0] snake_case = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: snake_case = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ ) if text is not None: snake_case = self.tokenizer(A__ , **A__ ) if text is None: return inputs elif audio is None: return encodings else: snake_case = encodings['''input_ids'''] return inputs def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]: return self.tokenizer.batch_decode(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> str: return self.tokenizer.decode(*A__ , **A__ ) def UpperCamelCase ( self , A__ , A__="np" ) -> Optional[Any]: return self.tokenizer.get_prompt_ids(A__ , return_tensors=A__ )
44
0
'''simple docstring''' import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib _lowercase = threading.Lock() _lowercase = None _lowercase = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } _lowercase = logging.WARNING _lowercase = True def __UpperCamelCase ( ) ->str: snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """ f"""has to be one of: { ', '.join(log_levels.keys() ) }""" ) return _default_log_level def __UpperCamelCase ( ) ->str: return __name__.split('''.''' )[0] def __UpperCamelCase ( ) ->logging.Logger: return logging.getLogger(_get_library_name() ) def __UpperCamelCase ( ) ->None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return snake_case = logging.StreamHandler() # Set sys.stderr as stream. snake_case = sys.stderr.flush # Apply our default configuration to the library root logger. snake_case = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) snake_case = False def __UpperCamelCase ( ) ->None: global _default_handler with _lock: if not _default_handler: return snake_case = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) snake_case = None def __UpperCamelCase ( ) ->str: return log_levels def __UpperCamelCase ( a : Optional[str] = None ) ->logging.Logger: if name is None: snake_case = _get_library_name() _configure_library_root_logger() return logging.getLogger(a ) def __UpperCamelCase ( ) ->int: _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def __UpperCamelCase ( a : int ) ->None: _configure_library_root_logger() _get_library_root_logger().setLevel(a ) def __UpperCamelCase ( ) ->Any: return set_verbosity(a ) def __UpperCamelCase ( ) ->int: return set_verbosity(a ) def __UpperCamelCase ( ) ->Tuple: return set_verbosity(a ) def __UpperCamelCase ( ) ->Union[str, Any]: return set_verbosity(a ) def __UpperCamelCase ( ) ->None: _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def __UpperCamelCase ( ) ->None: _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def __UpperCamelCase ( a : logging.Handler ) ->None: _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(a ) def __UpperCamelCase ( a : logging.Handler ) ->None: _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(a ) def __UpperCamelCase ( ) ->None: _configure_library_root_logger() snake_case = False def __UpperCamelCase ( ) ->None: _configure_library_root_logger() snake_case = True def __UpperCamelCase ( ) ->None: snake_case = _get_library_root_logger().handlers for handler in handlers: snake_case = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' ) handler.setFormatter(a ) def __UpperCamelCase ( ) ->None: snake_case = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(a ) def __UpperCamelCase ( self : List[str] , *a : List[Any] , **a : Union[str, Any] ) ->Optional[Any]: snake_case = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , a ) if no_advisory_warnings: return self.warning(*a , **a ) _lowercase = warning_advice @functools.lru_cache(a ) def __UpperCamelCase ( self : Optional[Any] , *a : str , **a : Dict ) ->List[str]: self.warning(*a , **a ) _lowercase = warning_once class _lowercase : def __init__( self , *A__ , **A__ ) -> List[Any]: # pylint: disable=unused-argument snake_case = args[0] if args else None def __iter__( self ) -> int: return iter(self._iterator ) def __getattr__( self , A__ ) -> Union[str, Any]: def empty_fn(*A__ , **A__ ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ) -> Optional[Any]: return self def __exit__( self , A__ , A__ , A__ ) -> Dict: return class _lowercase : def __call__( self , *A__ , **A__ ) -> int: if _tqdm_active: return tqdm_lib.tqdm(*A__ , **A__ ) else: return EmptyTqdm(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> List[Any]: snake_case = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*A__ , **A__ ) def UpperCamelCase ( self ) -> Optional[Any]: if _tqdm_active: return tqdm_lib.tqdm.get_lock() _lowercase = _tqdm_cls() def __UpperCamelCase ( ) ->bool: global _tqdm_active return bool(_tqdm_active ) def __UpperCamelCase ( ) ->List[Any]: global _tqdm_active snake_case = True hf_hub_utils.enable_progress_bars() def __UpperCamelCase ( ) ->Any: global _tqdm_active snake_case = False hf_hub_utils.disable_progress_bars()
713
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _lowercase ( __a ): _UpperCAmelCase = '''char''' _UpperCAmelCase = '''bpe''' _UpperCAmelCase = '''wp''' _lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _lowercase ( __a ): _UpperCAmelCase = ['''image_processor''', '''char_tokenizer'''] _UpperCAmelCase = '''ViTImageProcessor''' _UpperCAmelCase = '''MgpstrTokenizer''' def __init__( self , A__=None , A__=None , **A__ ) -> List[Any]: snake_case = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , A__ , ) snake_case = kwargs.pop('''feature_extractor''' ) snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) snake_case = tokenizer snake_case = AutoTokenizer.from_pretrained('''gpt2''' ) snake_case = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(A__ , A__ ) def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> List[str]: if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: snake_case = self.image_processor(A__ , return_tensors=A__ , **A__ ) if text is not None: snake_case = self.char_tokenizer(A__ , return_tensors=A__ , **A__ ) if text is None: return inputs elif images is None: return encodings else: snake_case = encodings['''input_ids'''] return inputs def UpperCamelCase ( self , A__ ) -> Dict: snake_case , snake_case , snake_case = sequences snake_case = char_preds.size(0 ) snake_case , snake_case = self._decode_helper(A__ , '''char''' ) snake_case , snake_case = self._decode_helper(A__ , '''bpe''' ) snake_case , snake_case = self._decode_helper(A__ , '''wp''' ) snake_case = [] snake_case = [] for i in range(A__ ): snake_case = [char_scores[i], bpe_scores[i], wp_scores[i]] snake_case = [char_strs[i], bpe_strs[i], wp_strs[i]] snake_case = scores.index(max(A__ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) snake_case = {} snake_case = final_strs snake_case = final_scores snake_case = char_strs snake_case = bpe_strs snake_case = wp_strs return out def UpperCamelCase ( self , A__ , A__ ) -> Optional[Any]: if format == DecodeType.CHARACTER: snake_case = self.char_decode snake_case = 1 snake_case = '''[s]''' elif format == DecodeType.BPE: snake_case = self.bpe_decode snake_case = 2 snake_case = '''#''' elif format == DecodeType.WORDPIECE: snake_case = self.wp_decode snake_case = 1_02 snake_case = '''[SEP]''' else: raise ValueError(F"""Format {format} is not supported.""" ) snake_case , snake_case = [], [] snake_case = pred_logits.size(0 ) snake_case = pred_logits.size(1 ) snake_case , snake_case = pred_logits.topk(1 , dim=-1 , largest=A__ , sorted=A__ ) snake_case = preds_index.view(-1 , A__ )[:, 1:] snake_case = decoder(A__ ) snake_case , snake_case = torch.nn.functional.softmax(A__ , dim=2 ).max(dim=2 ) snake_case = preds_max_prob[:, 1:] for index in range(A__ ): snake_case = preds_str[index].find(A__ ) snake_case = preds_str[index][:pred_eos] snake_case = preds_index[index].cpu().tolist() snake_case = pred_index.index(A__ ) if eos_token in pred_index else -1 snake_case = preds_max_prob[index][: pred_eos_index + 1] snake_case = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(A__ ) conf_scores.append(A__ ) return dec_strs, conf_scores def UpperCamelCase ( self , A__ ) -> int: snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(A__ )] return decode_strs def UpperCamelCase ( self , A__ ) -> List[str]: return self.bpe_tokenizer.batch_decode(A__ ) def UpperCamelCase ( self , A__ ) -> Union[str, Any]: snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(A__ )] return decode_strs
44
0
'''simple docstring''' import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def __UpperCamelCase ( *a : List[str] , a : Optional[Union[Dict, Any]] = None , a : Union[str, Any]=True , a : Dict=2 ) ->Any: from .. import __version__ snake_case = take_from snake_case = () if not isinstance(args[0] , a ): snake_case = (args,) for attribute, version_name, message in args: if version.parse(version.parse(a ).base_version ) >= version.parse(a ): raise ValueError( f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'""" f""" version {__version__} is >= {version_name}""" ) snake_case = None if isinstance(a , a ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(a ),) snake_case = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}.""" elif hasattr(a , a ): values += (getattr(a , a ),) snake_case = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}.""" elif deprecated_kwargs is None: snake_case = f"""`{attribute}` is deprecated and will be removed in version {version_name}.""" if warning is not None: snake_case = warning + ''' ''' if standard_warn else '''''' warnings.warn(warning + message , a , stacklevel=a ) if isinstance(a , a ) and len(a ) > 0: snake_case = inspect.getouterframes(inspect.currentframe() )[1] snake_case = call_frame.filename snake_case = call_frame.lineno snake_case = call_frame.function snake_case , snake_case = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" ) if len(a ) == 0: return elif len(a ) == 1: return values[0] return values
714
'''simple docstring''' import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType _lowercase , _lowercase , _lowercase = False, False, False @dataclass class _lowercase : _UpperCAmelCase = None _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = None # Automatically constructed _UpperCAmelCase = "dict" _UpperCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) _UpperCAmelCase = field(default='''Audio''' , init=__a , repr=__a ) def __call__( self ) -> Optional[Any]: return self.pa_type def UpperCamelCase ( self , A__ ) -> dict: try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err if isinstance(A__ , A__ ): return {"bytes": None, "path": value} elif isinstance(A__ , A__ ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes snake_case = BytesIO() sf.write(A__ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith('''pcm''' ): # "PCM" only has raw audio bytes if value.get('''sampling_rate''' ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' ) if value.get('''bytes''' ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) snake_case = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67 else: snake_case = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67 snake_case = BytesIO(bytes() ) sf.write(A__ , A__ , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def UpperCamelCase ( self , A__ , A__ = None ) -> dict: if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' ) snake_case , snake_case = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None) if path is None and file is None: raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err snake_case = xsplitext(A__ )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( '''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( '''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) if file is None: snake_case = token_per_repo_id or {} snake_case = path.split('''::''' )[-1] try: snake_case = string_to_dict(A__ , config.HUB_DATASETS_URL )['''repo_id'''] snake_case = token_per_repo_id[repo_id] except (ValueError, KeyError): snake_case = None with xopen(A__ , '''rb''' , use_auth_token=A__ ) as f: snake_case , snake_case = sf.read(A__ ) else: snake_case , snake_case = sf.read(A__ ) snake_case = array.T if self.mono: snake_case = librosa.to_mono(A__ ) if self.sampling_rate and self.sampling_rate != sampling_rate: snake_case = librosa.resample(A__ , orig_sr=A__ , target_sr=self.sampling_rate ) snake_case = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def UpperCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value if self.decode: raise ValueError('''Cannot flatten a decoded Audio feature.''' ) return { "bytes": Value('''binary''' ), "path": Value('''string''' ), } def UpperCamelCase ( self , A__ ) -> pa.StructArray: if pa.types.is_string(storage.type ): snake_case = pa.array([None] * len(A__ ) , type=pa.binary() ) snake_case = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): snake_case = pa.array([None] * len(A__ ) , type=pa.string() ) snake_case = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ): snake_case = pa.array([Audio().encode_example(A__ ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: snake_case = storage.field('''bytes''' ) else: snake_case = pa.array([None] * len(A__ ) , type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: snake_case = storage.field('''path''' ) else: snake_case = pa.array([None] * len(A__ ) , type=pa.string() ) snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) return array_cast(A__ , self.pa_type ) def UpperCamelCase ( self , A__ ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(A__ ): with xopen(A__ , '''rb''' ) as f: snake_case = f.read() return bytes_ snake_case = pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) snake_case = pa.array( [os.path.basename(A__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , ) snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() ) return array_cast(A__ , self.pa_type )
44
0
'''simple docstring''' import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def __UpperCamelCase ( a : List[Any] , a : Optional[Any] , a : Optional[int] ) ->Union[str, Any]: snake_case = UniSpeechSatForSequenceClassification.from_pretrained(a , config=a ) snake_case = downstream_dict['''projector.weight'''] snake_case = downstream_dict['''projector.bias'''] snake_case = downstream_dict['''model.post_net.linear.weight'''] snake_case = downstream_dict['''model.post_net.linear.bias'''] return model def __UpperCamelCase ( a : Tuple , a : int , a : Optional[int] ) ->str: snake_case = UniSpeechSatForAudioFrameClassification.from_pretrained(a , config=a ) snake_case = downstream_dict['''model.linear.weight'''] snake_case = downstream_dict['''model.linear.bias'''] return model def __UpperCamelCase ( a : Optional[int] , a : Optional[Any] , a : Optional[int] ) ->Optional[Any]: snake_case = UniSpeechSatForXVector.from_pretrained(a , config=a ) snake_case = downstream_dict['''connector.weight'''] snake_case = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): snake_case = downstream_dict[ f"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] snake_case = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] snake_case = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] snake_case = downstream_dict['''objective.W'''] return model @torch.no_grad() def __UpperCamelCase ( a : Any , a : str , a : Any , a : Tuple ) ->List[Any]: snake_case = torch.load(a , map_location='''cpu''' ) snake_case = checkpoint['''Downstream'''] snake_case = UniSpeechSatConfig.from_pretrained(a ) snake_case = WavaVecaFeatureExtractor.from_pretrained( a , return_attention_mask=a , do_normalize=a ) snake_case = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): snake_case = convert_classification(a , a , a ) elif arch.endswith('''ForAudioFrameClassification''' ): snake_case = convert_diarization(a , a , a ) elif arch.endswith('''ForXVector''' ): snake_case = convert_xvector(a , a , a ) else: raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: snake_case = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(a ) hf_model.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') _lowercase = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
715
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class _lowercase : @staticmethod def UpperCamelCase ( *A__ , **A__ ) -> List[Any]: pass def __UpperCamelCase ( a : Image ) ->str: snake_case = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class _lowercase ( unittest.TestCase ): _UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]: snake_case = DepthEstimationPipeline(model=A__ , image_processor=A__ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def UpperCamelCase ( self , A__ , A__ ) -> List[Any]: snake_case = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , A__ ) import datasets snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) snake_case = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , A__ , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def UpperCamelCase ( self ) -> Optional[Any]: pass @slow @require_torch def UpperCamelCase ( self ) -> Dict: snake_case = '''Intel/dpt-large''' snake_case = pipeline('''depth-estimation''' , model=A__ ) snake_case = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) snake_case = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 ) @require_torch def UpperCamelCase ( self ) -> Any: # This is highly irregular to have no small tests. self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
44
0
'''simple docstring''' import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowercase ( __a , unittest.TestCase ): _UpperCAmelCase = CodeGenTokenizer _UpperCAmelCase = CodeGenTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = {'''add_prefix_space''': True} _UpperCAmelCase = False def UpperCamelCase ( self ) -> Tuple: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] snake_case = dict(zip(A__ , range(len(A__ ) ) ) ) snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] snake_case = {'''unk_token''': '''<unk>'''} snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(A__ ) ) def UpperCamelCase ( self , **A__ ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self , **A__ ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self , A__ ) -> Tuple: snake_case = '''lower newer''' snake_case = '''lower newer''' return input_text, output_text def UpperCamelCase ( self ) -> List[Any]: snake_case = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case = '''lower newer''' snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ ) self.assertListEqual(A__ , A__ ) snake_case = tokens + [tokenizer.unk_token] snake_case = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ ) def UpperCamelCase ( self ) -> Optional[int]: if not self.test_rust_tokenizer: return snake_case = self.get_tokenizer() snake_case = self.get_rust_tokenizer(add_prefix_space=A__ ) snake_case = '''lower newer''' # Testing tokenization snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ ) snake_case = rust_tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) # Testing conversion to ids without special tokens snake_case = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ ) snake_case = rust_tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertListEqual(A__ , A__ ) # Testing conversion to ids with special tokens snake_case = self.get_rust_tokenizer(add_prefix_space=A__ ) snake_case = tokenizer.encode(A__ , add_prefix_space=A__ ) snake_case = rust_tokenizer.encode(A__ ) self.assertListEqual(A__ , A__ ) # Testing the unknown token snake_case = tokens + [rust_tokenizer.unk_token] snake_case = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A__ ) , A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> List[str]: # It's very difficult to mix/test pretokenization with byte-level # And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def UpperCamelCase ( self , A__=15 ) -> Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) # Simple input snake_case = '''This is a simple input''' snake_case = ['''This is a simple input 1''', '''This is a simple input 2'''] snake_case = ('''This is a simple input''', '''This is a pair''') snake_case = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' ) # Simple input self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' ) # Simple input self.assertRaises( A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , ) # Pair input self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' ) # Pair input self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' ) # Pair input self.assertRaises( A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , ) def UpperCamelCase ( self ) -> Tuple: snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input snake_case = '''This is a simple input''' snake_case = ['''This is a simple input looooooooong''', '''This is a simple input'''] snake_case = ('''This is a simple input''', '''This is a pair''') snake_case = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] snake_case = tokenizer.pad_token_id snake_case = tokenizer(A__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' ) snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' ) snake_case = tokenizer(*A__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' ) snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def UpperCamelCase ( self ) -> str: snake_case = '''$$$''' snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=A__ , add_bos_token=A__ ) snake_case = '''This is a simple input''' snake_case = ['''This is a simple input 1''', '''This is a simple input 2'''] snake_case = tokenizer.bos_token_id snake_case = tokenizer(A__ ) snake_case = tokenizer(A__ ) self.assertEqual(out_s.input_ids[0] , A__ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) snake_case = tokenizer.decode(out_s.input_ids ) snake_case = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , A__ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def UpperCamelCase ( self ) -> Any: snake_case = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' ) snake_case = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#''' snake_case = '''\nif len_a > len_b: result = a\nelse: result = b''' snake_case = tokenizer.encode(A__ ) snake_case = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n'''] snake_case = tokenizer.decode(A__ , truncate_before_pattern=A__ ) self.assertEqual(A__ , A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: pass
716
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __UpperCamelCase ( a : Optional[int] ) ->Dict: snake_case = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(a , a ) def __UpperCamelCase ( a : Optional[Any] ) ->int: snake_case = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: snake_case = s_dict.pop(a ) elif "subsample" in key: snake_case = s_dict.pop(a ) def __UpperCamelCase ( a : Optional[int] ) ->Optional[int]: snake_case , snake_case = emb.weight.shape snake_case = nn.Linear(a , a , bias=a ) snake_case = emb.weight.data return lin_layer def __UpperCamelCase ( a : Any , a : Tuple ) ->Tuple: snake_case = torch.load(a , map_location='''cpu''' ) snake_case = mam_aaa['''args'''] snake_case = mam_aaa['''model'''] snake_case = state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(a ) rename_keys(a ) snake_case = state_dict['''decoder.embed_tokens.weight'''].shape[0] snake_case = args.share_decoder_input_output_embed snake_case = [int(a ) for i in args.conv_kernel_sizes.split(''',''' )] snake_case = SpeechaTextConfig( vocab_size=a , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(a ) , conv_channels=args.conv_channels , conv_kernel_sizes=a , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=a , num_beams=5 , max_length=200 , use_cache=a , decoder_start_token_id=2 , early_stopping=a , ) snake_case = SpeechaTextForConditionalGeneration(a ) snake_case , snake_case = model.model.load_state_dict(a , strict=a ) if len(a ) > 0 and not set(a ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f""" but all the following weights are missing {missing}""" ) if tie_embeds: snake_case = make_linear_from_emb(model.model.decoder.embed_tokens ) else: snake_case = lm_head_weights model.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') _lowercase = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
44
0
'''simple docstring''' import math import sys import cva import numpy as np def __UpperCamelCase ( a : np.ndarray , a : float ) ->np.ndarray: # For applying gaussian function for each element in matrix. snake_case = math.sqrt(a ) snake_case = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def __UpperCamelCase ( a : np.ndarray , a : int , a : int , a : int ) ->np.ndarray: snake_case = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def __UpperCamelCase ( a : int , a : float ) ->np.ndarray: # Creates a gaussian kernel of given dimension. snake_case = np.zeros((kernel_size, kernel_size) ) for i in range(0 , a ): for j in range(0 , a ): snake_case = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(a , a ) def __UpperCamelCase ( a : np.ndarray , a : float , a : float , a : int , ) ->np.ndarray: snake_case = np.zeros(img.shape ) snake_case = get_gauss_kernel(a , a ) snake_case , snake_case = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): snake_case = get_slice(a , a , a , a ) snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2] snake_case = vec_gaussian(a , a ) snake_case = np.multiply(a , a ) snake_case = np.multiply(a , a ) snake_case = np.sum(a ) / np.sum(a ) snake_case = val return imga def __UpperCamelCase ( a : list ) ->tuple: snake_case = args[1] if args[1:] else '''../image_data/lena.jpg''' snake_case = float(args[2] ) if args[2:] else 1.0 snake_case = float(args[3] ) if args[3:] else 1.0 if args[4:]: snake_case = int(args[4] ) snake_case = kernel_size + abs(kernel_size % 2 - 1 ) else: snake_case = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": _lowercase , _lowercase , _lowercase , _lowercase = parse_args(sys.argv) _lowercase = cva.imread(filename, 0) cva.imshow('input image', img) _lowercase = img / 255 _lowercase = out.astype('float32') _lowercase = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) _lowercase = out * 255 _lowercase = np.uinta(out) cva.imshow('output image', out) cva.waitKey(0) cva.destroyAllWindows()
717
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowercase ( metaclass=__a ): _UpperCAmelCase = ['''transformers''', '''torch''', '''note_seq'''] def __init__( self , *A__ , **A__ ) -> Union[str, Any]: requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def UpperCamelCase ( cls , *A__ , **A__ ) -> Optional[Any]: requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def UpperCamelCase ( cls , *A__ , **A__ ) -> Any: requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
44
0
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowercase ( __a ): def __init__( self , A__ , A__ ) -> str: super().__init__() # make sure scheduler can always be converted to DDIM snake_case = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=A__ , scheduler=A__ ) @torch.no_grad() def __call__( self , A__ = 1 , A__ = None , A__ = 0.0 , A__ = 50 , A__ = None , A__ = "pil" , A__ = True , ) -> Union[ImagePipelineOutput, Tuple]: # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , A__ ): snake_case = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: snake_case = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(A__ , A__ ) and len(A__ ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(A__ )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) snake_case = randn_tensor(A__ , generator=A__ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(A__ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output snake_case = self.unet(A__ , A__ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 snake_case = self.scheduler.step( A__ , A__ , A__ , eta=A__ , use_clipped_model_output=A__ , generator=A__ ).prev_sample snake_case = (image / 2 + 0.5).clamp(0 , 1 ) snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": snake_case = self.numpy_to_pil(A__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A__ )
718
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator class _lowercase : def __init__( self , A__ ) -> None: snake_case = value snake_case = None snake_case = None class _lowercase : def __init__( self , A__ ) -> None: snake_case = tree def UpperCamelCase ( self , A__ ) -> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self ) -> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
44
0
'''simple docstring''' import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=512, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def __UpperCamelCase ( a : Union[str, Any] ) ->Union[str, Any]: if string == "True": return True elif string == "False": return False else: raise ValueError(f"""could not parse string as bool {string}""" ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) _lowercase = parser.parse_args() _lowercase = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
719
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) _lowercase = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] _lowercase = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def __UpperCamelCase ( a : List[str] ) ->Optional[int]: snake_case = torch.load(a , map_location='''cpu''' ) return sd def __UpperCamelCase ( a : Optional[int] , a : Union[str, Any] , a : int=rename_keys_prefix ) ->Tuple: snake_case = OrderedDict() snake_case = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue snake_case = key for name_pair in rename_keys_prefix: snake_case = new_key.replace(name_pair[0] , name_pair[1] ) snake_case = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately snake_case = new_d['''cls.predictions.bias'''] return new_d @torch.no_grad() def __UpperCamelCase ( a : Optional[int] , a : int ) ->Union[str, Any]: assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: snake_case = '''pretraining''' if "vcr" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 512} elif "vqa_advanced" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048} elif "vqa" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048} elif "nlvr" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 1024} else: raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 512} snake_case = '''multichoice''' elif "vqa_advanced" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048} snake_case = '''vqa_advanced''' elif "vqa" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129} snake_case = '''vqa''' elif "nlvr" in checkpoint_path: snake_case = { '''visual_embedding_dim''': 1024, '''num_labels''': 2, } snake_case = '''nlvr''' snake_case = VisualBertConfig(**a ) # Load State Dict snake_case = load_state_dict(a ) snake_case = get_new_dict(a , a ) if model_type == "pretraining": snake_case = VisualBertForPreTraining(a ) elif model_type == "vqa": snake_case = VisualBertForQuestionAnswering(a ) elif model_type == "nlvr": snake_case = VisualBertForVisualReasoning(a ) elif model_type == "multichoice": snake_case = VisualBertForMultipleChoice(a ) model.load_state_dict(a ) # Save Checkpoints Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') _lowercase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
44
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: _lowercase = None _lowercase = logging.get_logger(__name__) _lowercase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} _lowercase = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } _lowercase = { 'facebook/nllb-large-en-ro': 1_024, 'facebook/nllb-200-distilled-600M': 1_024, } # fmt: off _lowercase = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class _lowercase ( __a ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = ['''input_ids''', '''attention_mask'''] _UpperCAmelCase = NllbTokenizer _UpperCAmelCase = [] _UpperCAmelCase = [] def __init__( self , A__=None , A__=None , A__="<s>" , A__="</s>" , A__="</s>" , A__="<s>" , A__="<unk>" , A__="<pad>" , A__="<mask>" , A__=None , A__=None , A__=None , A__=False , **A__ , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it snake_case = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token snake_case = legacy_behaviour super().__init__( vocab_file=A__ , tokenizer_file=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , src_lang=A__ , tgt_lang=A__ , additional_special_tokens=A__ , legacy_behaviour=A__ , **A__ , ) snake_case = vocab_file snake_case = False if not self.vocab_file else True snake_case = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) snake_case = { lang_code: self.convert_tokens_to_ids(A__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } snake_case = src_lang if src_lang is not None else '''eng_Latn''' snake_case = self.convert_tokens_to_ids(self._src_lang ) snake_case = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCamelCase ( self ) -> str: return self._src_lang @src_lang.setter def UpperCamelCase ( self , A__ ) -> None: snake_case = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase ( self , A__ , A__ , A__ , A__ , **A__ ) -> Union[str, Any]: if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) snake_case = src_lang snake_case = self(A__ , add_special_tokens=A__ , return_tensors=A__ , **A__ ) snake_case = self.convert_tokens_to_ids(A__ ) snake_case = tgt_lang_id return inputs def UpperCamelCase ( self , A__ , A__ = "eng_Latn" , A__ = None , A__ = "fra_Latn" , **A__ , ) -> BatchEncoding: snake_case = src_lang snake_case = tgt_lang return super().prepare_seqaseq_batch(A__ , A__ , **A__ ) def UpperCamelCase ( self ) -> List[Any]: return self.set_src_lang_special_tokens(self.src_lang ) def UpperCamelCase ( self ) -> Optional[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCamelCase ( self , A__ ) -> None: snake_case = self.convert_tokens_to_ids(A__ ) if self.legacy_behaviour: snake_case = [] snake_case = [self.eos_token_id, self.cur_lang_code] else: snake_case = [self.cur_lang_code] snake_case = [self.eos_token_id] snake_case = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def UpperCamelCase ( self , A__ ) -> None: snake_case = self.convert_tokens_to_ids(A__ ) if self.legacy_behaviour: snake_case = [] snake_case = [self.eos_token_id, self.cur_lang_code] else: snake_case = [self.cur_lang_code] snake_case = [self.eos_token_id] snake_case = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(A__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" ) return snake_case = os.path.join( A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ): copyfile(self.vocab_file , A__ ) return (out_vocab_file,)
720
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def __UpperCamelCase ( a : Dict , a : Optional[int] , a : Dict , a : Dict ) ->Union[str, Any]: snake_case = original_name.split('''.''' )[0] snake_case = key.split('''.''' ) snake_case = int(key_list[key_list.index(a ) - 2] ) snake_case = int(key_list[key_list.index(a ) - 1] ) snake_case = orig_block_num - offset snake_case = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" ) return key def __UpperCamelCase ( a : Tuple ) ->Dict: snake_case = OrderedDict() snake_case , snake_case = 0, 0 for key, value in state_dict.items(): if key.startswith('''network''' ): snake_case = key.replace('''network''' , '''poolformer.encoder''' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('''bias''' ) and "patch_embed" not in key: patch_emb_offset += 1 snake_case = key[: key.find('''proj''' )] snake_case = key.replace(a , f"""patch_embeddings.{total_embed_found}.""" ) snake_case = key.replace('''proj''' , '''projection''' ) if key.endswith('''bias''' ): total_embed_found += 1 if "patch_embeddings" in key: snake_case = '''poolformer.encoder.''' + key if "mlp.fc1" in key: snake_case = replace_key_with_offset(a , a , '''mlp.fc1''' , '''output.conv1''' ) if "mlp.fc2" in key: snake_case = replace_key_with_offset(a , a , '''mlp.fc2''' , '''output.conv2''' ) if "norm1" in key: snake_case = replace_key_with_offset(a , a , '''norm1''' , '''before_norm''' ) if "norm2" in key: snake_case = replace_key_with_offset(a , a , '''norm2''' , '''after_norm''' ) if "layer_scale_1" in key: snake_case = replace_key_with_offset(a , a , '''layer_scale_1''' , '''layer_scale_1''' ) if "layer_scale_2" in key: snake_case = replace_key_with_offset(a , a , '''layer_scale_2''' , '''layer_scale_2''' ) if "head" in key: snake_case = key.replace('''head''' , '''classifier''' ) snake_case = value return new_state_dict def __UpperCamelCase ( ) ->Optional[int]: snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case = Image.open(requests.get(a , stream=a ).raw ) return image @torch.no_grad() def __UpperCamelCase ( a : Dict , a : Optional[Any] , a : Tuple ) ->List[str]: snake_case = PoolFormerConfig() # set attributes based on model_name snake_case = '''huggingface/label-files''' snake_case = model_name[-3:] snake_case = 1000 snake_case = '''imagenet-1k-id2label.json''' snake_case = (1, 1000) # set config attributes snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) ) snake_case = {int(a ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} if size == "s12": snake_case = [2, 2, 6, 2] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 0.9 elif size == "s24": snake_case = [4, 4, 12, 4] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 0.9 elif size == "s36": snake_case = [6, 6, 18, 6] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.9 elif size == "m36": snake_case = [6, 6, 18, 6] snake_case = [96, 192, 384, 768] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.95 elif size == "m48": snake_case = [8, 8, 24, 8] snake_case = [96, 192, 384, 768] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.95 else: raise ValueError(f"""Size {size} not supported""" ) # load image processor snake_case = PoolFormerImageProcessor(crop_pct=a ) # Prepare image snake_case = prepare_img() snake_case = image_processor(images=a , return_tensors='''pt''' ).pixel_values logger.info(f"""Converting model {model_name}...""" ) # load original state dict snake_case = torch.load(a , map_location=torch.device('''cpu''' ) ) # rename keys snake_case = rename_keys(a ) # create HuggingFace model and load state dict snake_case = PoolFormerForImageClassification(a ) model.load_state_dict(a ) model.eval() # Define image processor snake_case = PoolFormerImageProcessor(crop_pct=a ) snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values # forward pass snake_case = model(a ) snake_case = outputs.logits # define expected logit slices for different models if size == "s12": snake_case = torch.tensor([-0.3045, -0.6758, -0.4869] ) elif size == "s24": snake_case = torch.tensor([0.4402, -0.1374, -0.8045] ) elif size == "s36": snake_case = torch.tensor([-0.6080, -0.5133, -0.5898] ) elif size == "m36": snake_case = torch.tensor([0.3952, 0.2263, -1.2668] ) elif size == "m48": snake_case = torch.tensor([0.1167, -0.0656, -0.3423] ) else: raise ValueError(f"""Size {size} not supported""" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a , atol=1e-2 ) # finally, save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) _lowercase = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
44
0
import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel _lowercase = False _lowercase = True _lowercase = False if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '--repo_path', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') _lowercase = parser.parse_args() _lowercase = { 'image_size': 'sample_size', 'num_res_blocks': 'layers_per_block', 'block_channels': 'block_out_channels', 'down_blocks': 'down_block_types', 'up_blocks': 'up_block_types', 'downscale_freq_shift': 'freq_shift', 'resnet_num_groups': 'norm_num_groups', 'resnet_act_fn': 'act_fn', 'resnet_eps': 'norm_eps', 'num_head_channels': 'attention_head_dim', } _lowercase = { 'time_steps': 'time_proj', 'mid': 'mid_block', 'downsample_blocks': 'down_blocks', 'upsample_blocks': 'up_blocks', } _lowercase = '' if has_file(args.repo_path, 'config.json') else 'unet' with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader: _lowercase = reader.read() _lowercase = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, 'config.json'): _lowercase = UNetaDModel(**config) else: _lowercase = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel _lowercase = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) _lowercase = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: _lowercase = config[key] del config[key] _lowercase = [k.replace('UNetRes', '') for k in config['down_block_types']] _lowercase = [k.replace('UNetRes', '') for k in config['up_block_types']] if do_only_weights: _lowercase = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin')) _lowercase = {} for param_key, param_value in state_dict.items(): if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'): continue _lowercase = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split('.')[0] == key: _lowercase = param_value _lowercase = True if not has_changed: _lowercase = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
721
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow _lowercase = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) _lowercase = logging.getLogger() def __UpperCamelCase ( ) ->Tuple: snake_case = argparse.ArgumentParser() parser.add_argument('''-f''' ) snake_case = parser.parse_args() return args.f def __UpperCamelCase ( a : Dict , a : Tuple="eval" ) ->List[Any]: snake_case = os.path.join(a , f"""{split}_results.json""" ) if os.path.exists(a ): with open(a , '''r''' ) as f: return json.load(a ) raise ValueError(f"""can't find {path}""" ) _lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _lowercase ( __a ): def UpperCamelCase ( self ) -> List[str]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(A__ , '''argv''' , A__ ): run_flax_glue.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 ) @slow def UpperCamelCase ( self ) -> List[Any]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A__ , '''argv''' , A__ ): run_clm_flax.main() snake_case = get_results(A__ ) self.assertLess(result['''eval_perplexity'''] , 1_00 ) @slow def UpperCamelCase ( self ) -> int: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(A__ , '''argv''' , A__ ): run_summarization_flax.main() snake_case = get_results(A__ , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(A__ , '''argv''' , A__ ): run_mlm_flax.main() snake_case = get_results(A__ ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def UpperCamelCase ( self ) -> Dict: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A__ , '''argv''' , A__ ): run_ta_mlm_flax.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 ) @slow def UpperCamelCase ( self ) -> int: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu snake_case = 7 if get_gpu_count() > 1 else 2 snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(A__ , '''argv''' , A__ ): run_flax_ner.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def UpperCamelCase ( self ) -> Any: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(A__ , '''argv''' , A__ ): run_qa.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
44
0
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) _lowercase = [ 'cross_validation.py', 'gradient_accumulation.py', 'local_sgd.py', 'multi_process_metrics.py', 'memory.py', 'automatic_gradient_accumulation.py', 'fsdp_with_peak_mem_tracking.py', 'deepspeed_with_config_support.py', 'megatron_lm_gpt_pretraining.py', ] class _lowercase ( unittest.TestCase ): def UpperCamelCase ( self , A__ , A__ , A__ = None , A__ = None ) -> List[str]: snake_case = None snake_case = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) snake_case = os.path.abspath('''examples''' ) for item in os.listdir(A__ ): if item not in EXCLUDE_EXAMPLES: snake_case = os.path.join(A__ , A__ ) if os.path.isfile(A__ ) and ".py" in item_path: with self.subTest( tested_script=A__ , feature_script=A__ , tested_section='''main()''' if parser_only else '''training_function()''' , ): snake_case = compare_against_test( os.path.join(A__ , A__ ) , A__ , A__ , A__ ) snake_case = '''\n'''.join(A__ ) if special_strings is not None: for string in special_strings: snake_case = diff.replace(A__ , '''''' ) self.assertEqual(A__ , '''''' ) def UpperCamelCase ( self ) -> Optional[int]: self.one_complete_example('''complete_nlp_example.py''' , A__ ) self.one_complete_example('''complete_nlp_example.py''' , A__ ) def UpperCamelCase ( self ) -> Optional[int]: snake_case = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) snake_case = [ ''' ''' * 16 + '''{\n\n''', ''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 20 + '''"epoch": epoch,\n\n''', ''' ''' * 16 + '''},\n\n''', ''' ''' * 16 + '''step=epoch,\n''', ''' ''' * 12, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , A__ , A__ , A__ ) self.one_complete_example('''complete_cv_example.py''' , A__ , A__ , A__ ) @mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} ) class _lowercase ( __a ): _UpperCAmelCase = False @classmethod def UpperCamelCase ( cls ) -> Union[str, Any]: super().setUpClass() snake_case = tempfile.mkdtemp() snake_case = os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) snake_case = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def UpperCamelCase ( cls ) -> str: super().tearDownClass() shutil.rmtree(cls._tmpdir ) def UpperCamelCase ( self ) -> List[Any]: snake_case = F""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def UpperCamelCase ( self ) -> Any: snake_case = F""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() snake_case = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def UpperCamelCase ( self ) -> List[str]: snake_case = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )} """.split() snake_case = run_command(self._launch_args + testargs , return_stdout=A__ ) self.assertNotIn('''epoch 0:''' , A__ ) self.assertIn('''epoch 1:''' , A__ ) def UpperCamelCase ( self ) -> Optional[Any]: snake_case = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )} """.split() snake_case = run_command(self._launch_args + testargs , return_stdout=A__ ) if torch.cuda.is_available(): snake_case = torch.cuda.device_count() else: snake_case = 1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , A__ ) self.assertIn('''epoch 1:''' , A__ ) else: self.assertIn('''epoch 0:''' , A__ ) self.assertIn('''epoch 1:''' , A__ ) @slow def UpperCamelCase ( self ) -> str: snake_case = ''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): snake_case = run_command(self._launch_args + testargs , return_stdout=A__ ) snake_case = re.findall('''({.+})''' , A__ ) snake_case = [r for r in results if '''accuracy''' in r][-1] snake_case = ast.literal_eval(A__ ) self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 ) def UpperCamelCase ( self ) -> List[Any]: snake_case = ['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def UpperCamelCase ( self ) -> Tuple: with tempfile.TemporaryDirectory() as tmpdir: snake_case = F""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(A__ , '''tracking''' ) ) ) def UpperCamelCase ( self ) -> Dict: snake_case = ['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = ['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
700
'''simple docstring''' from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS _lowercase = logging.get_logger(__name__) _lowercase = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, 'constant': get_constant_schedule, 'constant_w_warmup': get_constant_schedule_with_warmup, } class _lowercase ( __a ): def __init__( self , A__=None , A__=None , *A__ , **A__ ) -> Union[str, Any]: super().__init__(*A__ , **A__ ) if config is None: assert isinstance(self.model , A__ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F""" {self.model.__class__}""" ) snake_case = self.model.config else: snake_case = config snake_case = data_args snake_case = self.config.tgt_vocab_size if isinstance(self.config , A__ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for""" ''' padding..''' ) if self.args.label_smoothing == 0: snake_case = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss snake_case = label_smoothed_nll_loss def UpperCamelCase ( self , A__ ) -> Tuple: if self.optimizer is None: snake_case = ['''bias''', '''LayerNorm.weight'''] snake_case = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] snake_case = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: snake_case = Adafactor snake_case = {'''scale_parameter''': False, '''relative_step''': False} else: snake_case = AdamW snake_case = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } snake_case = self.args.learning_rate if self.sharded_ddp: snake_case = OSS( params=A__ , optim=A__ , **A__ , ) else: snake_case = optimizer_cls(A__ , **A__ ) if self.lr_scheduler is None: snake_case = self._get_lr_scheduler(A__ ) else: # ignoring --lr_scheduler logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' ) def UpperCamelCase ( self , A__ ) -> Tuple: snake_case = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": snake_case = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": snake_case = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: snake_case = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A__ ) return scheduler def UpperCamelCase ( self ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[Any]: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token snake_case = model(**A__ , use_cache=A__ )[0] snake_case = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models snake_case , snake_case = model(**A__ , labels=A__ , use_cache=A__ )[:2] else: # compute label smoothed loss snake_case = model(**A__ , use_cache=A__ )[0] snake_case = torch.nn.functional.log_softmax(A__ , dim=-1 ) snake_case , snake_case = self.loss_fn(A__ , A__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def UpperCamelCase ( self , A__ , A__ ) -> Any: snake_case = inputs.pop('''labels''' ) snake_case , snake_case = self._compute_loss(A__ , A__ , A__ ) return loss def UpperCamelCase ( self , A__ , A__ , A__ , A__ = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: snake_case = self._prepare_inputs(A__ ) snake_case = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: snake_case = self.model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **A__ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] ) snake_case = inputs.pop('''labels''' ) with torch.no_grad(): # compute loss on predict data snake_case , snake_case = self._compute_loss(A__ , A__ , A__ ) snake_case = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) snake_case = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] ) return (loss, logits, labels) def UpperCamelCase ( self , A__ , A__ ) -> List[str]: # If PAD token is not defined at least EOS token has to be defined snake_case = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( '''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be''' F""" padded to `max_length`={max_length}""" ) snake_case = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) snake_case = tensor return padded_tensor
44
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class _lowercase ( unittest.TestCase ): def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=4_00 , A__=True , A__=None , A__=True , A__=False , A__=True , A__=True , A__=[0.5, 0.5, 0.5] , A__=[0.5, 0.5, 0.5] , ) -> Dict: snake_case = parent snake_case = batch_size snake_case = num_channels snake_case = image_size snake_case = min_resolution snake_case = max_resolution snake_case = do_resize snake_case = size if size is not None else {'''height''': 18, '''width''': 20} snake_case = do_thumbnail snake_case = do_align_axis snake_case = do_pad snake_case = do_normalize snake_case = image_mean snake_case = image_std def UpperCamelCase ( self ) -> List[Any]: return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _lowercase ( __a , unittest.TestCase ): _UpperCAmelCase = DonutImageProcessor if is_vision_available() else None def UpperCamelCase ( self ) -> Tuple: snake_case = DonutImageProcessingTester(self ) @property def UpperCamelCase ( self ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self ) -> List[Any]: snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A__ , '''do_resize''' ) ) self.assertTrue(hasattr(A__ , '''size''' ) ) self.assertTrue(hasattr(A__ , '''do_thumbnail''' ) ) self.assertTrue(hasattr(A__ , '''do_align_long_axis''' ) ) self.assertTrue(hasattr(A__ , '''do_pad''' ) ) self.assertTrue(hasattr(A__ , '''do_normalize''' ) ) self.assertTrue(hasattr(A__ , '''image_mean''' ) ) self.assertTrue(hasattr(A__ , '''image_std''' ) ) def UpperCamelCase ( self ) -> int: snake_case = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} ) snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) # Previous config had dimensions in (width, height) order snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} ) def UpperCamelCase ( self ) -> List[Any]: pass @is_flaky() def UpperCamelCase ( self ) -> List[str]: # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ ) for image in image_inputs: self.assertIsInstance(A__ , Image.Image ) # Test not batched input snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def UpperCamelCase ( self ) -> str: # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ ) for image in image_inputs: self.assertIsInstance(A__ , np.ndarray ) # Test not batched input snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def UpperCamelCase ( self ) -> Optional[Any]: # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ ) for image in image_inputs: self.assertIsInstance(A__ , torch.Tensor ) # Test not batched input snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , )
701
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def __UpperCamelCase ( a : List[str] ) ->str: snake_case = [] for line in lines: snake_case = re.sub(R'''#.*''' , '''''' , a ) # remove comments if line: filtered_lines.append(a ) snake_case = '''\n'''.join(a ) # Make a hash from all this code snake_case = full_str.encode('''utf-8''' ) return shaaaa(a ).hexdigest() # get importable module names and hash for caching _lowercase = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions _lowercase = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _lowercase = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name _lowercase = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
44
0
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def __UpperCamelCase ( a : int ) ->int: snake_case = tmp_path / '''file.csv''' snake_case = textwrap.dedent( '''\ header1,header2 1,2 10,20 ''' ) with open(a , '''w''' ) as f: f.write(a ) return str(a ) @pytest.fixture def __UpperCamelCase ( a : Tuple ) ->Tuple: snake_case = tmp_path / '''malformed_file.csv''' snake_case = textwrap.dedent( '''\ header1,header2 1,2 10,20, ''' ) with open(a , '''w''' ) as f: f.write(a ) return str(a ) @pytest.fixture def __UpperCamelCase ( a : List[str] , a : List[str] ) ->Any: snake_case = tmp_path / '''csv_with_image.csv''' snake_case = textwrap.dedent( f"""\ image {image_file} """ ) with open(a , '''w''' ) as f: f.write(a ) return str(a ) @pytest.fixture def __UpperCamelCase ( a : int ) ->List[str]: snake_case = tmp_path / '''csv_with_label.csv''' snake_case = textwrap.dedent( '''\ label good bad good ''' ) with open(a , '''w''' ) as f: f.write(a ) return str(a ) @pytest.fixture def __UpperCamelCase ( a : int ) ->Any: snake_case = tmp_path / '''csv_with_int_list.csv''' snake_case = textwrap.dedent( '''\ int_list 1 2 3 4 5 6 7 8 9 ''' ) with open(a , '''w''' ) as f: f.write(a ) return str(a ) def __UpperCamelCase ( a : str , a : Dict , a : Tuple ) ->List[str]: snake_case = Csv() snake_case = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(a , match='''Error tokenizing data''' ): for _ in generator: pass assert any( record.levelname == '''ERROR''' and '''Failed to read file''' in record.message and os.path.basename(a ) in record.message for record in caplog.records ) @require_pil def __UpperCamelCase ( a : Union[str, Any] ) ->Any: with open(a , encoding='''utf-8''' ) as f: snake_case = f.read().splitlines()[1] snake_case = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) ) snake_case = csv._generate_tables([[csv_file_with_image]] ) snake_case = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('''image''' ).type == Image()() snake_case = pa_table.to_pydict()['''image'''] assert generated_content == [{"path": image_file, "bytes": None}] def __UpperCamelCase ( a : str ) ->Union[str, Any]: with open(a , encoding='''utf-8''' ) as f: snake_case = f.read().splitlines()[1:] snake_case = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) ) snake_case = csv._generate_tables([[csv_file_with_label]] ) snake_case = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )() snake_case = pa_table.to_pydict()['''label'''] assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(a ) for label in labels] def __UpperCamelCase ( a : Dict ) ->Optional[Any]: snake_case = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda a : [int(a ) for i in x.split()]} ) snake_case = csv._generate_tables([[csv_file_with_int_list]] ) snake_case = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type ) snake_case = pa_table.to_pydict()['''int_list'''] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
702
'''simple docstring''' _lowercase = { 'Pillow': 'Pillow', 'accelerate': 'accelerate>=0.11.0', 'compel': 'compel==0.1.8', 'black': 'black~=23.1', 'datasets': 'datasets', 'filelock': 'filelock', 'flax': 'flax>=0.4.1', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.13.2', 'requests-mock': 'requests-mock==1.10.0', 'importlib_metadata': 'importlib_metadata', 'invisible-watermark': 'invisible-watermark', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2', 'jaxlib': 'jaxlib>=0.1.65', 'Jinja2': 'Jinja2', 'k-diffusion': 'k-diffusion>=0.0.12', 'torchsde': 'torchsde', 'note_seq': 'note_seq', 'librosa': 'librosa', 'numpy': 'numpy', 'omegaconf': 'omegaconf', 'parameterized': 'parameterized', 'protobuf': 'protobuf>=3.20.3,<4', 'pytest': 'pytest', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'ruff': 'ruff>=0.0.241', 'safetensors': 'safetensors', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'scipy': 'scipy', 'onnx': 'onnx', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'tensorboard': 'tensorboard', 'torch': 'torch>=1.4', 'torchvision': 'torchvision', 'transformers': 'transformers>=4.25.1', 'urllib3': 'urllib3<=2.0.0', }
44
0
'''simple docstring''' import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType _lowercase , _lowercase , _lowercase = False, False, False @dataclass class _lowercase : _UpperCAmelCase = None _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = None # Automatically constructed _UpperCAmelCase = '''dict''' _UpperCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) _UpperCAmelCase = field(default='''Audio''' , init=__a , repr=__a ) def __call__( self ) -> Optional[Any]: return self.pa_type def UpperCamelCase ( self , A__ ) -> dict: try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err if isinstance(A__ , A__ ): return {"bytes": None, "path": value} elif isinstance(A__ , A__ ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes snake_case = BytesIO() sf.write(A__ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith('''pcm''' ): # "PCM" only has raw audio bytes if value.get('''sampling_rate''' ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' ) if value.get('''bytes''' ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) snake_case = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67 else: snake_case = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67 snake_case = BytesIO(bytes() ) sf.write(A__ , A__ , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def UpperCamelCase ( self , A__ , A__ = None ) -> dict: if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' ) snake_case , snake_case = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None) if path is None and file is None: raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err snake_case = xsplitext(A__ )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( '''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( '''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) if file is None: snake_case = token_per_repo_id or {} snake_case = path.split('''::''' )[-1] try: snake_case = string_to_dict(A__ , config.HUB_DATASETS_URL )['''repo_id'''] snake_case = token_per_repo_id[repo_id] except (ValueError, KeyError): snake_case = None with xopen(A__ , '''rb''' , use_auth_token=A__ ) as f: snake_case , snake_case = sf.read(A__ ) else: snake_case , snake_case = sf.read(A__ ) snake_case = array.T if self.mono: snake_case = librosa.to_mono(A__ ) if self.sampling_rate and self.sampling_rate != sampling_rate: snake_case = librosa.resample(A__ , orig_sr=A__ , target_sr=self.sampling_rate ) snake_case = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def UpperCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value if self.decode: raise ValueError('''Cannot flatten a decoded Audio feature.''' ) return { "bytes": Value('''binary''' ), "path": Value('''string''' ), } def UpperCamelCase ( self , A__ ) -> pa.StructArray: if pa.types.is_string(storage.type ): snake_case = pa.array([None] * len(A__ ) , type=pa.binary() ) snake_case = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): snake_case = pa.array([None] * len(A__ ) , type=pa.string() ) snake_case = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ): snake_case = pa.array([Audio().encode_example(A__ ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: snake_case = storage.field('''bytes''' ) else: snake_case = pa.array([None] * len(A__ ) , type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: snake_case = storage.field('''path''' ) else: snake_case = pa.array([None] * len(A__ ) , type=pa.string() ) snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) return array_cast(A__ , self.pa_type ) def UpperCamelCase ( self , A__ ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(A__ ): with xopen(A__ , '''rb''' ) as f: snake_case = f.read() return bytes_ snake_case = pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) snake_case = pa.array( [os.path.basename(A__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , ) snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() ) return array_cast(A__ , self.pa_type )
703
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowercase ( __a , __a , unittest.TestCase ): _UpperCAmelCase = IFInpaintingSuperResolutionPipeline _UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} _UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) _UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''} def UpperCamelCase ( self ) -> int: return self._get_superresolution_dummy_components() def UpperCamelCase ( self , A__ , A__=0 ) -> Union[str, Any]: if str(A__ ).startswith('''mps''' ): snake_case = torch.manual_seed(A__ ) else: snake_case = torch.Generator(device=A__ ).manual_seed(A__ ) snake_case = floats_tensor((1, 3, 16, 16) , rng=random.Random(A__ ) ).to(A__ ) snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ ) snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ ) snake_case = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase ( self ) -> List[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def UpperCamelCase ( self ) -> Optional[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def UpperCamelCase ( self ) -> List[str]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def UpperCamelCase ( self ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def UpperCamelCase ( self ) -> Optional[Any]: self._test_save_load_local() def UpperCamelCase ( self ) -> Dict: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
44
0
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class _lowercase : @staticmethod def UpperCamelCase ( *A__ , **A__ ) -> List[Any]: pass def __UpperCamelCase ( a : Image ) ->str: snake_case = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class _lowercase ( unittest.TestCase ): _UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]: snake_case = DepthEstimationPipeline(model=A__ , image_processor=A__ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def UpperCamelCase ( self , A__ , A__ ) -> List[Any]: snake_case = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , A__ ) import datasets snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) snake_case = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , A__ , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def UpperCamelCase ( self ) -> Optional[Any]: pass @slow @require_torch def UpperCamelCase ( self ) -> Dict: snake_case = '''Intel/dpt-large''' snake_case = pipeline('''depth-estimation''' , model=A__ ) snake_case = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) snake_case = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.3_04 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 ) @require_torch def UpperCamelCase ( self ) -> Any: # This is highly irregular to have no small tests. self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
704
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy _lowercase = logging.get_logger(__name__) class _lowercase ( __a ): def __init__( self , A__ , A__ , A__ , **A__ ) -> Union[str, Any]: snake_case = feature_size snake_case = sampling_rate snake_case = padding_value snake_case = kwargs.pop('''padding_side''' , '''right''' ) snake_case = kwargs.pop('''return_attention_mask''' , A__ ) super().__init__(**A__ ) def UpperCamelCase ( self , A__ , A__ = True , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , ) -> BatchFeature: # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(A__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): snake_case = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( '''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`''' F""" to this method that includes {self.model_input_names[0]}, but you provided""" F""" {list(processed_features.keys() )}""" ) snake_case = processed_features[self.model_input_names[0]] snake_case = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(A__ ) == 0: if return_attention_mask: snake_case = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch snake_case = required_input[0] if isinstance(A__ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. snake_case = 0 while len(required_input[index] ) == 0: index += 1 if index < len(A__ ): snake_case = required_input[index][0] if return_tensors is None: if is_tf_tensor(A__ ): snake_case = '''tf''' elif is_torch_tensor(A__ ): snake_case = '''pt''' elif isinstance(A__ , (int, float, list, tuple, np.ndarray) ): snake_case = '''np''' else: raise ValueError( F"""type of {first_element} unknown: {type(A__ )}. """ '''Should be one of a python, numpy, pytorch or tensorflow object.''' ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): snake_case = to_numpy(A__ ) else: snake_case = [to_numpy(A__ ) for v in value] # Convert padding_strategy in PaddingStrategy snake_case = self._get_padding_strategies(padding=A__ , max_length=A__ ) snake_case = processed_features[self.model_input_names[0]] snake_case = len(A__ ) if not all(len(A__ ) == batch_size for v in processed_features.values() ): raise ValueError('''Some items in the output dictionary have a different batch size than others.''' ) snake_case = [] for i in range(A__ ): snake_case = {k: v[i] for k, v in processed_features.items()} # truncation snake_case = self._truncate( A__ , max_length=A__ , pad_to_multiple_of=A__ , truncation=A__ , ) truncated_inputs.append(A__ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) snake_case = PaddingStrategy.MAX_LENGTH snake_case = {} for i in range(A__ ): # padding snake_case = self._pad( truncated_inputs[i] , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , ) for key, value in outputs.items(): if key not in batch_outputs: snake_case = [] if value.dtype is np.dtype(np.floataa ): snake_case = value.astype(np.floataa ) batch_outputs[key].append(A__ ) return BatchFeature(A__ , tensor_type=A__ ) def UpperCamelCase ( self , A__ , A__ = None , A__ = PaddingStrategy.DO_NOT_PAD , A__ = None , A__ = None , ) -> dict: snake_case = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: snake_case = len(A__ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A__ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: snake_case = np.ones(len(A__ ) , dtype=np.intaa ) if needs_to_be_padded: snake_case = max_length - len(A__ ) if self.padding_side == "right": if return_attention_mask: snake_case = np.pad( processed_features['''attention_mask'''] , (0, difference) ) snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) snake_case = np.pad( A__ , A__ , '''constant''' , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: snake_case = np.pad( processed_features['''attention_mask'''] , (difference, 0) ) snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) snake_case = np.pad( A__ , A__ , '''constant''' , constant_values=self.padding_value ) else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return processed_features def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , ) -> Union[str, Any]: if not truncation: return processed_features elif truncation and max_length is None: raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' ) snake_case = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of snake_case = len(A__ ) > max_length if needs_to_be_truncated: snake_case = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: snake_case = processed_features['''attention_mask'''][:max_length] return processed_features def UpperCamelCase ( self , A__=False , A__=None ) -> Union[str, Any]: # Get padding strategy if padding is not False: if padding is True: snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(A__ , A__ ): snake_case = PaddingStrategy(A__ ) elif isinstance(A__ , A__ ): snake_case = padding else: snake_case = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( '''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use''' ''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' ) return padding_strategy
44
0
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class _lowercase ( unittest.TestCase ): @slow def UpperCamelCase ( self ) -> int: snake_case = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) snake_case = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house snake_case = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim snake_case = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case = model(A__ )['''last_hidden_state'''].detach() self.assertEqual(output.shape , A__ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , A__ , atol=1e-3 ) ) @slow def UpperCamelCase ( self ) -> Optional[int]: snake_case = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' ) snake_case = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] ) # The dog is cute and lives in the garden house snake_case = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim snake_case = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case = model(A__ )['''last_hidden_state'''].detach() self.assertEqual(output.shape , A__ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , A__ , atol=1e-3 ) )
705
'''simple docstring''' from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _lowercase ( yaml.SafeLoader ): def UpperCamelCase ( self , A__ ) -> List[str]: snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value] snake_case = [tuple(A__ ) if isinstance(A__ , A__ ) else key for key in keys] snake_case = Counter(A__ ) snake_case = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def UpperCamelCase ( self , A__ , A__=False ) -> List[Any]: snake_case = super().construct_mapping(A__ , deep=A__ ) self._check_no_duplicates_on_constructed_node(A__ ) return mapping def __UpperCamelCase ( a : str ) ->Tuple[Optional[str], str]: snake_case = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: snake_case = full_content[1:].index('''---''' ) + 1 snake_case = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(a ) class _lowercase ( __a ): # class attributes _UpperCAmelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata @classmethod def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata": with open(A__ , encoding='''utf-8''' ) as readme_file: snake_case , snake_case = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(A__ ) else: return cls() def UpperCamelCase ( self , A__ ) -> str: if path.exists(): with open(A__ , encoding='''utf-8''' ) as readme_file: snake_case = readme_file.read() else: snake_case = None snake_case = self._to_readme(A__ ) with open(A__ , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(A__ ) def UpperCamelCase ( self , A__ = None ) -> str: if readme_content is not None: snake_case , snake_case = _split_yaml_from_readme(A__ ) snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata": snake_case = yaml.load(A__ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields snake_case = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**A__ ) def UpperCamelCase ( self ) -> str: return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=A__ , allow_unicode=A__ , encoding='''utf-8''' , ).decode('''utf-8''' ) _lowercase = { 'image-classification': [], 'translation': [], 'image-segmentation': [], 'fill-mask': [], 'automatic-speech-recognition': [], 'token-classification': [], 'sentence-similarity': [], 'audio-classification': [], 'question-answering': [], 'summarization': [], 'zero-shot-classification': [], 'table-to-text': [], 'feature-extraction': [], 'other': [], 'multiple-choice': [], 'text-classification': [], 'text-to-image': [], 'text2text-generation': [], 'zero-shot-image-classification': [], 'tabular-classification': [], 'tabular-regression': [], 'image-to-image': [], 'tabular-to-text': [], 'unconditional-image-generation': [], 'text-retrieval': [], 'text-to-speech': [], 'object-detection': [], 'audio-to-audio': [], 'text-generation': [], 'conversational': [], 'table-question-answering': [], 'visual-question-answering': [], 'image-to-text': [], 'reinforcement-learning': [], 'voice-activity-detection': [], 'time-series-forecasting': [], 'document-question-answering': [], } if __name__ == "__main__": from argparse import ArgumentParser _lowercase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.') ap.add_argument('readme_filepath') _lowercase = ap.parse_args() _lowercase = Path(args.readme_filepath) _lowercase = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
44
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowercase ( __a , __a , unittest.TestCase ): _UpperCAmelCase = StableDiffusionSAGPipeline _UpperCAmelCase = TEXT_TO_IMAGE_PARAMS _UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCAmelCase = False def UpperCamelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) snake_case = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) snake_case = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=A__ , set_alpha_to_one=A__ , ) torch.manual_seed(0 ) snake_case = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) snake_case = CLIPTextModel(A__ ) snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCamelCase ( self , A__ , A__=0 ) -> Any: if str(A__ ).startswith('''mps''' ): snake_case = torch.manual_seed(A__ ) else: snake_case = torch.Generator(device=A__ ).manual_seed(A__ ) snake_case = { '''prompt''': '''.''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 1.0, '''sag_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def UpperCamelCase ( self ) -> List[Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class _lowercase ( unittest.TestCase ): def UpperCamelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) snake_case = sag_pipe.to(A__ ) sag_pipe.set_progress_bar_config(disable=A__ ) snake_case = '''.''' snake_case = torch.manual_seed(0 ) snake_case = sag_pipe( [prompt] , generator=A__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' ) snake_case = output.images snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) snake_case = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def UpperCamelCase ( self ) -> Dict: snake_case = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) snake_case = sag_pipe.to(A__ ) sag_pipe.set_progress_bar_config(disable=A__ ) snake_case = '''.''' snake_case = torch.manual_seed(0 ) snake_case = sag_pipe( [prompt] , generator=A__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' ) snake_case = output.images snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) snake_case = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def UpperCamelCase ( self ) -> int: snake_case = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) snake_case = sag_pipe.to(A__ ) sag_pipe.set_progress_bar_config(disable=A__ ) snake_case = '''.''' snake_case = torch.manual_seed(0 ) snake_case = sag_pipe( [prompt] , width=7_68 , height=5_12 , generator=A__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , ) snake_case = output.images assert image.shape == (1, 5_12, 7_68, 3)
706
'''simple docstring''' import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowercase ( __a , unittest.TestCase ): _UpperCAmelCase = CodeGenTokenizer _UpperCAmelCase = CodeGenTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = {'''add_prefix_space''': True} _UpperCAmelCase = False def UpperCamelCase ( self ) -> Tuple: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] snake_case = dict(zip(A__ , range(len(A__ ) ) ) ) snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] snake_case = {'''unk_token''': '''<unk>'''} snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(A__ ) ) def UpperCamelCase ( self , **A__ ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self , **A__ ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self , A__ ) -> Tuple: snake_case = '''lower newer''' snake_case = '''lower newer''' return input_text, output_text def UpperCamelCase ( self ) -> List[Any]: snake_case = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case = '''lower newer''' snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ ) self.assertListEqual(A__ , A__ ) snake_case = tokens + [tokenizer.unk_token] snake_case = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ ) def UpperCamelCase ( self ) -> Optional[int]: if not self.test_rust_tokenizer: return snake_case = self.get_tokenizer() snake_case = self.get_rust_tokenizer(add_prefix_space=A__ ) snake_case = '''lower newer''' # Testing tokenization snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ ) snake_case = rust_tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) # Testing conversion to ids without special tokens snake_case = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ ) snake_case = rust_tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertListEqual(A__ , A__ ) # Testing conversion to ids with special tokens snake_case = self.get_rust_tokenizer(add_prefix_space=A__ ) snake_case = tokenizer.encode(A__ , add_prefix_space=A__ ) snake_case = rust_tokenizer.encode(A__ ) self.assertListEqual(A__ , A__ ) # Testing the unknown token snake_case = tokens + [rust_tokenizer.unk_token] snake_case = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A__ ) , A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> List[str]: # It's very difficult to mix/test pretokenization with byte-level # And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def UpperCamelCase ( self , A__=15 ) -> Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) # Simple input snake_case = '''This is a simple input''' snake_case = ['''This is a simple input 1''', '''This is a simple input 2'''] snake_case = ('''This is a simple input''', '''This is a pair''') snake_case = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' ) # Simple input self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' ) # Simple input self.assertRaises( A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , ) # Pair input self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' ) # Pair input self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' ) # Pair input self.assertRaises( A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , ) def UpperCamelCase ( self ) -> Tuple: snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input snake_case = '''This is a simple input''' snake_case = ['''This is a simple input looooooooong''', '''This is a simple input'''] snake_case = ('''This is a simple input''', '''This is a pair''') snake_case = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] snake_case = tokenizer.pad_token_id snake_case = tokenizer(A__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' ) snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' ) snake_case = tokenizer(*A__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' ) snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def UpperCamelCase ( self ) -> str: snake_case = '''$$$''' snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=A__ , add_bos_token=A__ ) snake_case = '''This is a simple input''' snake_case = ['''This is a simple input 1''', '''This is a simple input 2'''] snake_case = tokenizer.bos_token_id snake_case = tokenizer(A__ ) snake_case = tokenizer(A__ ) self.assertEqual(out_s.input_ids[0] , A__ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) snake_case = tokenizer.decode(out_s.input_ids ) snake_case = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , A__ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def UpperCamelCase ( self ) -> Any: snake_case = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' ) snake_case = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#''' snake_case = '''\nif len_a > len_b: result = a\nelse: result = b''' snake_case = tokenizer.encode(A__ ) snake_case = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n'''] snake_case = tokenizer.decode(A__ , truncate_before_pattern=A__ ) self.assertEqual(A__ , A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: pass
44
0
'''simple docstring''' import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class _lowercase : def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=True , A__=False , A__=False , A__=False , A__=2 , A__=99 , A__=0 , A__=32 , A__=5 , A__=4 , A__=0.1 , A__=0.1 , A__=5_12 , A__=2 , A__=0.0_2 , A__=2 , A__=4 , A__="last" , A__=True , A__=None , A__=0 , ) -> Optional[Any]: snake_case = parent snake_case = batch_size snake_case = seq_length snake_case = is_training snake_case = use_input_lengths snake_case = use_token_type_ids snake_case = use_labels snake_case = gelu_activation snake_case = sinusoidal_embeddings snake_case = causal snake_case = asm snake_case = n_langs snake_case = vocab_size snake_case = n_special snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_sequence_label_size snake_case = initializer_range snake_case = num_labels snake_case = num_choices snake_case = summary_type snake_case = use_proj snake_case = scope snake_case = bos_token_id def UpperCamelCase ( self ) -> Dict: snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case = random_attention_mask([self.batch_size, self.seq_length] ) snake_case = None if self.use_input_lengths: snake_case = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length snake_case = None if self.use_token_type_ids: snake_case = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) snake_case = None snake_case = None snake_case = None if self.use_labels: snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case = ids_tensor([self.batch_size] , 2 ).float() snake_case = ids_tensor([self.batch_size] , self.num_choices ) snake_case = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCamelCase ( self ) -> Any: return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> str: snake_case = XLMModel(config=A__ ) model.to(A__ ) model.eval() snake_case = model(A__ , lengths=A__ , langs=A__ ) snake_case = model(A__ , langs=A__ ) snake_case = model(A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Union[str, Any]: snake_case = XLMWithLMHeadModel(A__ ) model.to(A__ ) model.eval() snake_case = model(A__ , token_type_ids=A__ , labels=A__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> List[str]: snake_case = XLMForQuestionAnsweringSimple(A__ ) model.to(A__ ) model.eval() snake_case = model(A__ ) snake_case = model(A__ , start_positions=A__ , end_positions=A__ ) snake_case = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Union[str, Any]: snake_case = XLMForQuestionAnswering(A__ ) model.to(A__ ) model.eval() snake_case = model(A__ ) snake_case = model( A__ , start_positions=A__ , end_positions=A__ , cls_index=A__ , is_impossible=A__ , p_mask=A__ , ) snake_case = model( A__ , start_positions=A__ , end_positions=A__ , cls_index=A__ , is_impossible=A__ , ) ((snake_case ) , ) = result_with_labels.to_tuple() snake_case = model(A__ , start_positions=A__ , end_positions=A__ ) ((snake_case ) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> int: snake_case = XLMForSequenceClassification(A__ ) model.to(A__ ) model.eval() snake_case = model(A__ ) snake_case = model(A__ , labels=A__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Dict: snake_case = self.num_labels snake_case = XLMForTokenClassification(A__ ) model.to(A__ ) model.eval() snake_case = model(A__ , attention_mask=A__ , labels=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Optional[Any]: snake_case = self.num_choices snake_case = XLMForMultipleChoice(config=A__ ) model.to(A__ ) model.eval() snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case = model( A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.prepare_config_and_inputs() ( ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ( snake_case ) , ) = config_and_inputs snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class _lowercase ( __a , __a , __a , unittest.TestCase ): _UpperCAmelCase = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) _UpperCAmelCase = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable _UpperCAmelCase = ( { '''feature-extraction''': XLMModel, '''fill-mask''': XLMWithLMHeadModel, '''question-answering''': XLMForQuestionAnsweringSimple, '''text-classification''': XLMForSequenceClassification, '''text-generation''': XLMWithLMHeadModel, '''token-classification''': XLMForTokenClassification, '''zero-shot''': XLMForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> Any: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCamelCase ( self , A__ , A__ , A__=False ) -> Optional[int]: snake_case = super()._prepare_for_class(A__ , A__ , return_labels=A__ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": snake_case = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A__ ) snake_case = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A__ ) return inputs_dict def UpperCamelCase ( self ) -> Optional[int]: snake_case = XLMModelTester(self ) snake_case = ConfigTester(self , config_class=A__ , emb_dim=37 ) def UpperCamelCase ( self ) -> List[Any]: self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> Tuple: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*A__ ) def UpperCamelCase ( self ) -> str: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*A__ ) def UpperCamelCase ( self ) -> Tuple: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*A__ ) def UpperCamelCase ( self ) -> Dict: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*A__ ) def UpperCamelCase ( self ) -> List[str]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*A__ ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__=False , A__=1 ) -> Any: self.assertIsInstance(A__ , A__ ) self.assertListEqual( [isinstance(A__ , A__ ) for iter_attentions in attentions] , [True] * len(A__ ) ) self.assertEqual(len(A__ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(A__ ): # adds PAD dummy token snake_case = min_length + idx + 1 snake_case = min_length + idx + 1 snake_case = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(A__ ) ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__=False , A__=1 ) -> Tuple: self.assertIsInstance(A__ , A__ ) self.assertListEqual( [isinstance(A__ , A__ ) for iter_hidden_states in hidden_states] , [True] * len(A__ ) , ) self.assertEqual(len(A__ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(A__ ): # adds PAD dummy token snake_case = min_length + idx + 1 snake_case = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(A__ ) , ) pass @slow def UpperCamelCase ( self ) -> Tuple: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case = XLMModel.from_pretrained(A__ ) self.assertIsNotNone(A__ ) @require_torch class _lowercase ( unittest.TestCase ): @slow def UpperCamelCase ( self ) -> List[str]: snake_case = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' ) model.to(A__ ) snake_case = torch.tensor([[14, 4_47]] , dtype=torch.long , device=A__ ) # the president snake_case = [ 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference snake_case = model.generate(A__ , do_sample=A__ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , A__ )
707
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowercase : def __init__( self , A__ , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.0_2 , A__=3 , A__=None , ) -> List[Any]: snake_case = parent snake_case = batch_size snake_case = image_size snake_case = patch_size snake_case = num_channels snake_case = is_training snake_case = use_labels snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = intermediate_size snake_case = hidden_act snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = type_sequence_label_size snake_case = initializer_range snake_case = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case = (image_size // patch_size) ** 2 snake_case = num_patches + 1 def UpperCamelCase ( self ) -> int: snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case = None if self.use_labels: snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self ) -> int: return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]: snake_case = TFViTModel(config=A__ ) snake_case = model(A__ , training=A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. snake_case = self.image_size // 2 snake_case = pixel_values[:, :, :image_size, :image_size] snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ ) snake_case = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[int]: snake_case = self.type_sequence_label_size snake_case = TFViTForImageClassification(A__ ) snake_case = model(A__ , labels=A__ , training=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. snake_case = self.image_size // 2 snake_case = pixel_values[:, :, :image_size, :image_size] snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case = 1 snake_case = TFViTForImageClassification(A__ ) snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.prepare_config_and_inputs() snake_case , snake_case , snake_case = config_and_inputs snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class _lowercase ( __a , __a , unittest.TestCase ): _UpperCAmelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () _UpperCAmelCase = ( {'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def UpperCamelCase ( self ) -> List[Any]: snake_case = TFViTModelTester(self ) snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 ) def UpperCamelCase ( self ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def UpperCamelCase ( self ) -> int: pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def UpperCamelCase ( self ) -> str: pass def UpperCamelCase ( self ) -> Union[str, Any]: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = model_class(A__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) snake_case = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A__ , tf.keras.layers.Layer ) ) def UpperCamelCase ( self ) -> List[Any]: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = model_class(A__ ) snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case = [*signature.parameters.keys()] snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def UpperCamelCase ( self ) -> Optional[Any]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A__ ) @slow def UpperCamelCase ( self ) -> Any: snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(A__ ) def __UpperCamelCase ( ) ->Any: snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class _lowercase ( unittest.TestCase ): @cached_property def UpperCamelCase ( self ) -> Optional[int]: return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def UpperCamelCase ( self ) -> Dict: snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) snake_case = self.default_image_processor snake_case = prepare_img() snake_case = image_processor(images=A__ , return_tensors='''tf''' ) # forward pass snake_case = model(**A__ ) # verify the logits snake_case = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , A__ ) snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ) tf.debugging.assert_near(outputs.logits[0, :3] , A__ , atol=1e-4 )
44
0
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch _lowercase = random.Random() def __UpperCamelCase ( a : List[Any] , a : int=1.0 , a : Dict=None , a : str=None ) ->Optional[int]: if rng is None: snake_case = global_rng snake_case = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class _lowercase ( unittest.TestCase ): def __init__( self , A__ , A__=7 , A__=4_00 , A__=20_00 , A__=10 , A__=1_60 , A__=8 , A__=0.0 , A__=40_00 , A__=False , A__=True , ) -> Union[str, Any]: snake_case = parent snake_case = batch_size snake_case = min_seq_length snake_case = max_seq_length snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) snake_case = padding_value snake_case = sampling_rate snake_case = return_attention_mask snake_case = do_normalize snake_case = feature_size snake_case = chunk_length snake_case = hop_length def UpperCamelCase ( self ) -> Tuple: return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase ( self , A__=False , A__=False ) -> List[Any]: def _flatten(A__ ): return list(itertools.chain(*A__ ) ) if equal_length: snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size snake_case = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: snake_case = [np.asarray(A__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _lowercase ( __a , unittest.TestCase ): _UpperCAmelCase = WhisperFeatureExtractor if is_speech_available() else None def UpperCamelCase ( self ) -> Optional[Any]: snake_case = WhisperFeatureExtractionTester(self ) def UpperCamelCase ( self ) -> Any: snake_case = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case = feat_extract_first.save_pretrained(A__ )[0] check_json_file_has_correct_format(A__ ) snake_case = self.feature_extraction_class.from_pretrained(A__ ) snake_case = feat_extract_first.to_dict() snake_case = feat_extract_second.to_dict() snake_case = feat_extract_first.mel_filters snake_case = feat_extract_second.mel_filters self.assertTrue(np.allclose(A__ , A__ ) ) self.assertEqual(A__ , A__ ) def UpperCamelCase ( self ) -> str: snake_case = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case = os.path.join(A__ , '''feat_extract.json''' ) feat_extract_first.to_json_file(A__ ) snake_case = self.feature_extraction_class.from_json_file(A__ ) snake_case = feat_extract_first.to_dict() snake_case = feat_extract_second.to_dict() snake_case = feat_extract_first.mel_filters snake_case = feat_extract_second.mel_filters self.assertTrue(np.allclose(A__ , A__ ) ) self.assertEqual(A__ , A__ ) def UpperCamelCase ( self ) -> List[str]: # Tests that all call wrap to encode_plus and batch_encode_plus snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] snake_case = [np.asarray(A__ ) for speech_input in speech_inputs] # Test feature size snake_case = feature_extractor(A__ , padding='''max_length''' , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input snake_case = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) ) # Test batched snake_case = feature_extractor(A__ , return_tensors='''np''' ).input_features snake_case = feature_extractor(A__ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A__ , A__ ): self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. snake_case = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] snake_case = np.asarray(A__ ) snake_case = feature_extractor(A__ , return_tensors='''np''' ).input_features snake_case = feature_extractor(A__ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A__ , A__ ): self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) ) # Test truncation required snake_case = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )] snake_case = [np.asarray(A__ ) for speech_input in speech_inputs] snake_case = [x[: feature_extractor.n_samples] for x in speech_inputs] snake_case = [np.asarray(A__ ) for speech_input in speech_inputs_truncated] snake_case = feature_extractor(A__ , return_tensors='''np''' ).input_features snake_case = feature_extractor(A__ , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(A__ , A__ ): self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) ) def UpperCamelCase ( self ) -> List[Any]: import torch snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case = np.random.rand(1_00 , 32 ).astype(np.floataa ) snake_case = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: snake_case = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) snake_case = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def UpperCamelCase ( self , A__ ) -> Union[str, Any]: snake_case = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech snake_case = ds.sort('''id''' ).select(range(A__ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def UpperCamelCase ( self ) -> List[str]: # fmt: off snake_case = torch.tensor( [ 0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1, 0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8, 0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4, -0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4 ] ) # fmt: on snake_case = self._load_datasamples(1 ) snake_case = WhisperFeatureExtractor() snake_case = feature_extractor(A__ , return_tensors='''pt''' ).input_features self.assertEqual(input_features.shape , (1, 80, 30_00) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , A__ , atol=1e-4 ) ) def UpperCamelCase ( self ) -> Optional[Any]: snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case = self._load_datasamples(1 )[0] snake_case = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue snake_case = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A__ )[0] self.assertTrue(np.all(np.mean(A__ ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(A__ ) - 1 ) < 1e-3 ) )
708
'''simple docstring''' import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path _lowercase = [ {'dataset': 'wikipedia', 'config_name': '20220301.de'}, {'dataset': 'wikipedia', 'config_name': '20220301.en'}, {'dataset': 'wikipedia', 'config_name': '20220301.fr'}, {'dataset': 'wikipedia', 'config_name': '20220301.frr'}, {'dataset': 'wikipedia', 'config_name': '20220301.it'}, {'dataset': 'wikipedia', 'config_name': '20220301.simple'}, {'dataset': 'snli', 'config_name': 'plain_text'}, {'dataset': 'eli5', 'config_name': 'LFQA_reddit'}, {'dataset': 'wiki40b', 'config_name': 'en'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'}, {'dataset': 'natural_questions', 'config_name': 'default'}, ] def __UpperCamelCase ( a : Dict=True ) ->str: if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__a ) ) class _lowercase ( __a ): _UpperCAmelCase = None _UpperCAmelCase = None def UpperCamelCase ( self , A__ , A__ ) -> str: with TemporaryDirectory() as tmp_dir: snake_case = dataset_module_factory(A__ , cache_dir=A__ ) snake_case = import_main_class(dataset_module.module_path , dataset=A__ ) snake_case = builder_cls( cache_dir=A__ , config_name=A__ , hash=dataset_module.hash , ) snake_case = '''/'''.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=A__ ).replace(os.sep , '''/''' ), config.DATASET_INFO_FILENAME, ] ) snake_case = cached_path(A__ , cache_dir=A__ ) self.assertTrue(os.path.exists(A__ ) ) @pytest.mark.integration def __UpperCamelCase ( a : List[str] ) ->Any: snake_case = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple''' snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a ) snake_case = import_main_class(dataset_module.module_path ) snake_case = builder_cls( cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam snake_case = None builder_instance.download_and_prepare() snake_case = builder_instance.as_dataset() assert ds @pytest.mark.integration def __UpperCamelCase ( a : Any ) ->Union[str, Any]: snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a ) snake_case = import_main_class(dataset_module.module_path , dataset=a ) snake_case = builder_cls( cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , ) snake_case = builder_instance.as_streaming_dataset() assert ds assert isinstance(a , a ) assert "train" in ds assert isinstance(ds['''train'''] , a ) assert next(iter(ds['''train'''] ) )
44
0
'''simple docstring''' import math from datetime import datetime, timedelta def __UpperCamelCase ( a : int ) ->datetime: snake_case = year % 19 snake_case = year % 4 snake_case = year % 7 snake_case = math.floor(year / 100 ) snake_case = math.floor((13 + 8 * leap_day_inhibits) / 25 ) snake_case = leap_day_inhibits / 4 snake_case = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 snake_case = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 snake_case = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon snake_case = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(a , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(a , 4 , 18 ) else: return datetime(a , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_994, 2_000, 2_010, 2_021, 2_023): _lowercase = 'will be' if year > datetime.now().year else 'was' print(f'Easter in {year} {tense} {gauss_easter(year)}')
709
'''simple docstring''' def __UpperCamelCase ( a : int , a : int ) ->int: while b: snake_case , snake_case = b, a % b return a def __UpperCamelCase ( a : int , a : int ) ->int: return a if b == 0 else euclidean_gcd_recursive(a , a % b ) def __UpperCamelCase ( ) ->Optional[Any]: print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
44
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def __UpperCamelCase ( a : str , a : Optional[Any]=False ) ->Tuple: snake_case = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def __UpperCamelCase ( a : Any , a : Dict , a : Optional[Any]=False ) ->Optional[Any]: for i in range(config.num_hidden_layers ): if base_model: snake_case = '''''' else: snake_case = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) snake_case = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case = in_proj_weight[ : config.hidden_size, : ] snake_case = in_proj_bias[: config.hidden_size] snake_case = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case = in_proj_weight[ -config.hidden_size :, : ] snake_case = in_proj_bias[-config.hidden_size :] def __UpperCamelCase ( a : Any ) ->Optional[int]: snake_case = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(a , a ) def __UpperCamelCase ( a : Union[str, Any] , a : List[Any] , a : Dict ) ->List[Any]: snake_case = dct.pop(a ) snake_case = val def __UpperCamelCase ( ) ->int: snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case = Image.open(requests.get(a , stream=a ).raw ) return im @torch.no_grad() def __UpperCamelCase ( a : List[Any] , a : int , a : List[str]=True ) ->str: snake_case = ViTConfig() # patch_size if model_name[-1] == "8": snake_case = 8 # set labels if required if not base_model: snake_case = 1000 snake_case = '''huggingface/label-files''' snake_case = '''imagenet-1k-id2label.json''' snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) ) snake_case = {int(a ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: snake_case = 384 snake_case = 1536 snake_case = 12 snake_case = 6 # load original model from torch hub snake_case = torch.hub.load('''facebookresearch/dino:main''' , a ) original_model.eval() # load state_dict of original model, remove and rename some keys snake_case = original_model.state_dict() if base_model: remove_classification_head_(a ) snake_case = create_rename_keys(a , base_model=a ) for src, dest in rename_keys: rename_key(a , a , a ) read_in_q_k_v(a , a , a ) # load HuggingFace model if base_model: snake_case = ViTModel(a , add_pooling_layer=a ).eval() else: snake_case = ViTForImageClassification(a ).eval() model.load_state_dict(a ) # Check outputs on an image, prepared by ViTImageProcessor snake_case = ViTImageProcessor() snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ) snake_case = encoding['''pixel_values'''] snake_case = model(a ) if base_model: snake_case = original_model(a ) assert torch.allclose(a , outputs.last_hidden_state[:, 0, :] , atol=1e-1 ) else: snake_case = original_model(a ) assert logits.shape == outputs.logits.shape assert torch.allclose(a , outputs.logits , atol=1e-3 ) Path(a ).mkdir(exist_ok=a ) print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(a ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='dino_vitb16', type=str, help='Name of the model trained with DINO you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--base_model', action='store_true', help='Whether to only convert the base model (no projection head weights).', ) parser.set_defaults(base_model=True) _lowercase = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
710
'''simple docstring''' import argparse import copy def __UpperCamelCase ( a : Union[str, Any] ) ->Tuple: snake_case = {} with open(a ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: snake_case = [] _list.append([line.split()[1], line.split()[2]] ) snake_case = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: snake_case = [] _list.append([line.split()[0], line.split()[2]] ) snake_case = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def __UpperCamelCase ( a : Dict , a : Tuple ) ->int: with open(a ) as f: snake_case = f.read(1 ) snake_case = start_node snake_case = [] snake_case = start_node snake_case = 0 while visiting not in first_solution: snake_case = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(a ) and k[0] not in first_solution: snake_case = k[1] snake_case = k[0] first_solution.append(a ) snake_case = distance_of_first_solution + int(a ) snake_case = best_node first_solution.append(a ) snake_case = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 snake_case = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def __UpperCamelCase ( a : Optional[int] , a : str ) ->str: snake_case = [] for n in solution[1:-1]: snake_case = solution.index(a ) for kn in solution[1:-1]: snake_case = solution.index(a ) if n == kn: continue snake_case = copy.deepcopy(a ) snake_case = kn snake_case = n snake_case = 0 for k in _tmp[:-1]: snake_case = _tmp[_tmp.index(a ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: snake_case = distance + int(i[1] ) _tmp.append(a ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) snake_case = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda a : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def __UpperCamelCase ( a : Any , a : Optional[Any] , a : int , a : Optional[int] , a : Union[str, Any] ) ->List[Any]: snake_case = 1 snake_case = first_solution snake_case = [] snake_case = distance_of_first_solution snake_case = solution while count <= iters: snake_case = find_neighborhood(a , a ) snake_case = 0 snake_case = neighborhood[index_of_best_solution] snake_case = len(a ) - 1 snake_case = False while not found: snake_case = 0 while i < len(a ): if best_solution[i] != solution[i]: snake_case = best_solution[i] snake_case = solution[i] break snake_case = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) snake_case = True snake_case = best_solution[:-1] snake_case = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: snake_case = cost snake_case = solution else: snake_case = index_of_best_solution + 1 snake_case = neighborhood[index_of_best_solution] if len(a ) >= size: tabu_list.pop(0 ) snake_case = count + 1 return best_solution_ever, best_cost def __UpperCamelCase ( a : Union[str, Any]=None ) ->Optional[Any]: snake_case = generate_neighbours(args.File ) snake_case , snake_case = generate_first_solution( args.File , a ) snake_case , snake_case = tabu_search( a , a , a , args.Iterations , args.Size , ) print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
44
0
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class _lowercase ( __a ): @staticmethod @abstractmethod def UpperCamelCase ( A__ ) -> Dict: raise NotImplementedError() @abstractmethod def UpperCamelCase ( self ) -> List[str]: raise NotImplementedError()
711
'''simple docstring''' from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
44
0
def __UpperCamelCase ( a : int ) ->list: snake_case = int(a ) if n_element < 1: snake_case = ValueError('''a should be a positive number''' ) raise my_error snake_case = [1] snake_case , snake_case , snake_case = (0, 0, 0) snake_case = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": _lowercase = input('Enter the last number (nth term) of the Hamming Number Series: ') print('Formula of Hamming Number Series => 2^i * 3^j * 5^k') _lowercase = hamming(int(n)) print('-----------------------------------------------------') print(f'The list with nth numbers is: {hamming_numbers}') print('-----------------------------------------------------')
712
'''simple docstring''' from ...processing_utils import ProcessorMixin class _lowercase ( __a ): _UpperCAmelCase = '''WhisperFeatureExtractor''' _UpperCAmelCase = '''WhisperTokenizer''' def __init__( self , A__ , A__ ) -> Optional[Any]: super().__init__(A__ , A__ ) snake_case = self.feature_extractor snake_case = False def UpperCamelCase ( self , A__=None , A__=None , A__=True ) -> Union[str, Any]: return self.tokenizer.get_decoder_prompt_ids(task=A__ , language=A__ , no_timestamps=A__ ) def __call__( self , *A__ , **A__ ) -> Dict: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*A__ , **A__ ) snake_case = kwargs.pop('''audio''' , A__ ) snake_case = kwargs.pop('''sampling_rate''' , A__ ) snake_case = kwargs.pop('''text''' , A__ ) if len(A__ ) > 0: snake_case = args[0] snake_case = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: snake_case = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ ) if text is not None: snake_case = self.tokenizer(A__ , **A__ ) if text is None: return inputs elif audio is None: return encodings else: snake_case = encodings['''input_ids'''] return inputs def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]: return self.tokenizer.batch_decode(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> str: return self.tokenizer.decode(*A__ , **A__ ) def UpperCamelCase ( self , A__ , A__="np" ) -> Optional[Any]: return self.tokenizer.get_prompt_ids(A__ , return_tensors=A__ )
44
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase = { 'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'], 'tokenization_biogpt': ['BioGptTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ 'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BioGptForCausalLM', 'BioGptForTokenClassification', 'BioGptForSequenceClassification', 'BioGptModel', 'BioGptPreTrainedModel', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
713
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _lowercase ( __a ): _UpperCAmelCase = '''char''' _UpperCAmelCase = '''bpe''' _UpperCAmelCase = '''wp''' _lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _lowercase ( __a ): _UpperCAmelCase = ['''image_processor''', '''char_tokenizer'''] _UpperCAmelCase = '''ViTImageProcessor''' _UpperCAmelCase = '''MgpstrTokenizer''' def __init__( self , A__=None , A__=None , **A__ ) -> List[Any]: snake_case = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , A__ , ) snake_case = kwargs.pop('''feature_extractor''' ) snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) snake_case = tokenizer snake_case = AutoTokenizer.from_pretrained('''gpt2''' ) snake_case = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(A__ , A__ ) def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> List[str]: if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: snake_case = self.image_processor(A__ , return_tensors=A__ , **A__ ) if text is not None: snake_case = self.char_tokenizer(A__ , return_tensors=A__ , **A__ ) if text is None: return inputs elif images is None: return encodings else: snake_case = encodings['''input_ids'''] return inputs def UpperCamelCase ( self , A__ ) -> Dict: snake_case , snake_case , snake_case = sequences snake_case = char_preds.size(0 ) snake_case , snake_case = self._decode_helper(A__ , '''char''' ) snake_case , snake_case = self._decode_helper(A__ , '''bpe''' ) snake_case , snake_case = self._decode_helper(A__ , '''wp''' ) snake_case = [] snake_case = [] for i in range(A__ ): snake_case = [char_scores[i], bpe_scores[i], wp_scores[i]] snake_case = [char_strs[i], bpe_strs[i], wp_strs[i]] snake_case = scores.index(max(A__ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) snake_case = {} snake_case = final_strs snake_case = final_scores snake_case = char_strs snake_case = bpe_strs snake_case = wp_strs return out def UpperCamelCase ( self , A__ , A__ ) -> Optional[Any]: if format == DecodeType.CHARACTER: snake_case = self.char_decode snake_case = 1 snake_case = '''[s]''' elif format == DecodeType.BPE: snake_case = self.bpe_decode snake_case = 2 snake_case = '''#''' elif format == DecodeType.WORDPIECE: snake_case = self.wp_decode snake_case = 1_02 snake_case = '''[SEP]''' else: raise ValueError(F"""Format {format} is not supported.""" ) snake_case , snake_case = [], [] snake_case = pred_logits.size(0 ) snake_case = pred_logits.size(1 ) snake_case , snake_case = pred_logits.topk(1 , dim=-1 , largest=A__ , sorted=A__ ) snake_case = preds_index.view(-1 , A__ )[:, 1:] snake_case = decoder(A__ ) snake_case , snake_case = torch.nn.functional.softmax(A__ , dim=2 ).max(dim=2 ) snake_case = preds_max_prob[:, 1:] for index in range(A__ ): snake_case = preds_str[index].find(A__ ) snake_case = preds_str[index][:pred_eos] snake_case = preds_index[index].cpu().tolist() snake_case = pred_index.index(A__ ) if eos_token in pred_index else -1 snake_case = preds_max_prob[index][: pred_eos_index + 1] snake_case = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(A__ ) conf_scores.append(A__ ) return dec_strs, conf_scores def UpperCamelCase ( self , A__ ) -> int: snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(A__ )] return decode_strs def UpperCamelCase ( self , A__ ) -> List[str]: return self.bpe_tokenizer.batch_decode(A__ ) def UpperCamelCase ( self , A__ ) -> Union[str, Any]: snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(A__ )] return decode_strs
44
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { 'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json', 'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json', 'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json', 'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json', 'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json', 'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json', 'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json', 'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json', 'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json', } class _lowercase ( __a ): _UpperCAmelCase = '''xmod''' def __init__( self , A__=3_05_22 , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=2 , A__=0.0_2 , A__=1e-12 , A__=1 , A__=0 , A__=2 , A__="absolute" , A__=True , A__=None , A__=False , A__=2 , A__=False , A__=True , A__=True , A__=("en_XX",) , A__=None , **A__ , ) -> List[Any]: super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = position_embedding_type snake_case = use_cache snake_case = classifier_dropout snake_case = pre_norm snake_case = adapter_reduction_factor snake_case = adapter_layer_norm snake_case = adapter_reuse_layer_norm snake_case = ln_before_adapter snake_case = list(A__ ) snake_case = default_language class _lowercase ( __a ): @property def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: snake_case = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
714
'''simple docstring''' import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType _lowercase , _lowercase , _lowercase = False, False, False @dataclass class _lowercase : _UpperCAmelCase = None _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = None # Automatically constructed _UpperCAmelCase = "dict" _UpperCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) _UpperCAmelCase = field(default='''Audio''' , init=__a , repr=__a ) def __call__( self ) -> Optional[Any]: return self.pa_type def UpperCamelCase ( self , A__ ) -> dict: try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err if isinstance(A__ , A__ ): return {"bytes": None, "path": value} elif isinstance(A__ , A__ ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes snake_case = BytesIO() sf.write(A__ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith('''pcm''' ): # "PCM" only has raw audio bytes if value.get('''sampling_rate''' ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' ) if value.get('''bytes''' ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) snake_case = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67 else: snake_case = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67 snake_case = BytesIO(bytes() ) sf.write(A__ , A__ , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def UpperCamelCase ( self , A__ , A__ = None ) -> dict: if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' ) snake_case , snake_case = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None) if path is None and file is None: raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err snake_case = xsplitext(A__ )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( '''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( '''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) if file is None: snake_case = token_per_repo_id or {} snake_case = path.split('''::''' )[-1] try: snake_case = string_to_dict(A__ , config.HUB_DATASETS_URL )['''repo_id'''] snake_case = token_per_repo_id[repo_id] except (ValueError, KeyError): snake_case = None with xopen(A__ , '''rb''' , use_auth_token=A__ ) as f: snake_case , snake_case = sf.read(A__ ) else: snake_case , snake_case = sf.read(A__ ) snake_case = array.T if self.mono: snake_case = librosa.to_mono(A__ ) if self.sampling_rate and self.sampling_rate != sampling_rate: snake_case = librosa.resample(A__ , orig_sr=A__ , target_sr=self.sampling_rate ) snake_case = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def UpperCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value if self.decode: raise ValueError('''Cannot flatten a decoded Audio feature.''' ) return { "bytes": Value('''binary''' ), "path": Value('''string''' ), } def UpperCamelCase ( self , A__ ) -> pa.StructArray: if pa.types.is_string(storage.type ): snake_case = pa.array([None] * len(A__ ) , type=pa.binary() ) snake_case = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): snake_case = pa.array([None] * len(A__ ) , type=pa.string() ) snake_case = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ): snake_case = pa.array([Audio().encode_example(A__ ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: snake_case = storage.field('''bytes''' ) else: snake_case = pa.array([None] * len(A__ ) , type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: snake_case = storage.field('''path''' ) else: snake_case = pa.array([None] * len(A__ ) , type=pa.string() ) snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) return array_cast(A__ , self.pa_type ) def UpperCamelCase ( self , A__ ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(A__ ): with xopen(A__ , '''rb''' ) as f: snake_case = f.read() return bytes_ snake_case = pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) snake_case = pa.array( [os.path.basename(A__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , ) snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() ) return array_cast(A__ , self.pa_type )
44
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
715
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class _lowercase : @staticmethod def UpperCamelCase ( *A__ , **A__ ) -> List[Any]: pass def __UpperCamelCase ( a : Image ) ->str: snake_case = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class _lowercase ( unittest.TestCase ): _UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]: snake_case = DepthEstimationPipeline(model=A__ , image_processor=A__ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def UpperCamelCase ( self , A__ , A__ ) -> List[Any]: snake_case = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , A__ ) import datasets snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) snake_case = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , A__ , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def UpperCamelCase ( self ) -> Optional[Any]: pass @slow @require_torch def UpperCamelCase ( self ) -> Dict: snake_case = '''Intel/dpt-large''' snake_case = pipeline('''depth-estimation''' , model=A__ ) snake_case = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) snake_case = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 ) @require_torch def UpperCamelCase ( self ) -> Any: # This is highly irregular to have no small tests. self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
44
0
'''simple docstring''' def __UpperCamelCase ( a : int , a : int ) ->int: while b: snake_case , snake_case = b, a % b return a def __UpperCamelCase ( a : int , a : int ) ->int: return a if b == 0 else euclidean_gcd_recursive(a , a % b ) def __UpperCamelCase ( ) ->Optional[Any]: print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
716
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __UpperCamelCase ( a : Optional[int] ) ->Dict: snake_case = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(a , a ) def __UpperCamelCase ( a : Optional[Any] ) ->int: snake_case = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: snake_case = s_dict.pop(a ) elif "subsample" in key: snake_case = s_dict.pop(a ) def __UpperCamelCase ( a : Optional[int] ) ->Optional[int]: snake_case , snake_case = emb.weight.shape snake_case = nn.Linear(a , a , bias=a ) snake_case = emb.weight.data return lin_layer def __UpperCamelCase ( a : Any , a : Tuple ) ->Tuple: snake_case = torch.load(a , map_location='''cpu''' ) snake_case = mam_aaa['''args'''] snake_case = mam_aaa['''model'''] snake_case = state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(a ) rename_keys(a ) snake_case = state_dict['''decoder.embed_tokens.weight'''].shape[0] snake_case = args.share_decoder_input_output_embed snake_case = [int(a ) for i in args.conv_kernel_sizes.split(''',''' )] snake_case = SpeechaTextConfig( vocab_size=a , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(a ) , conv_channels=args.conv_channels , conv_kernel_sizes=a , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=a , num_beams=5 , max_length=200 , use_cache=a , decoder_start_token_id=2 , early_stopping=a , ) snake_case = SpeechaTextForConditionalGeneration(a ) snake_case , snake_case = model.model.load_state_dict(a , strict=a ) if len(a ) > 0 and not set(a ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f""" but all the following weights are missing {missing}""" ) if tie_embeds: snake_case = make_linear_from_emb(model.model.decoder.embed_tokens ) else: snake_case = lm_head_weights model.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') _lowercase = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
44
0
'''simple docstring''' import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast _lowercase = datasets.utils.logging.get_logger(__name__) @dataclass class _lowercase ( datasets.BuilderConfig ): _UpperCAmelCase = 10_000 _UpperCAmelCase = None _UpperCAmelCase = None class _lowercase ( datasets.ArrowBasedBuilder ): _UpperCAmelCase = ParquetConfig def UpperCamelCase ( self ) -> Tuple: return datasets.DatasetInfo(features=self.config.features ) def UpperCamelCase ( self , A__ ) -> str: if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) snake_case = dl_manager.download_and_extract(self.config.data_files ) if isinstance(A__ , (str, list, tuple) ): snake_case = data_files if isinstance(A__ , A__ ): snake_case = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive snake_case = [dl_manager.iter_files(A__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] snake_case = [] for split_name, files in data_files.items(): if isinstance(A__ , A__ ): snake_case = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive snake_case = [dl_manager.iter_files(A__ ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(A__ ): with open(A__ , '''rb''' ) as f: snake_case = datasets.Features.from_arrow_schema(pq.read_schema(A__ ) ) break splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={'''files''': files} ) ) return splits def UpperCamelCase ( self , A__ ) -> pa.Table: if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example snake_case = table_cast(A__ , self.info.features.arrow_schema ) return pa_table def UpperCamelCase ( self , A__ ) -> Optional[int]: snake_case = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ): with open(A__ , '''rb''' ) as f: snake_case = pq.ParquetFile(A__ ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): snake_case = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F"""{file_idx}_{batch_idx}""", self._cast_table(A__ ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" ) raise
717
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowercase ( metaclass=__a ): _UpperCAmelCase = ['''transformers''', '''torch''', '''note_seq'''] def __init__( self , *A__ , **A__ ) -> Union[str, Any]: requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def UpperCamelCase ( cls , *A__ , **A__ ) -> Optional[Any]: requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def UpperCamelCase ( cls , *A__ , **A__ ) -> Any: requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
44
0
'''simple docstring''' from __future__ import annotations def __UpperCamelCase ( a : list ) ->list: if len(a ) == 0: return [] snake_case , snake_case = min(a ), max(a ) snake_case = int(max_value - min_value ) + 1 snake_case = [[] for _ in range(a )] for i in my_list: buckets[int(i - min_value )].append(a ) return [v for bucket in buckets for v in sorted(a )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
718
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator class _lowercase : def __init__( self , A__ ) -> None: snake_case = value snake_case = None snake_case = None class _lowercase : def __init__( self , A__ ) -> None: snake_case = tree def UpperCamelCase ( self , A__ ) -> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self ) -> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
44
0
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging _lowercase = '\\n\n' _lowercase = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n' _lowercase = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): def UpperCamelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def UpperCamelCase ( self , A__ , A__ , A__ = 16 , A__ = True , A__=None ) -> Optional[Any]: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": snake_case = '''cuda''' else: snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu''' snake_case = AutoModelForCausalLM.from_pretrained(A__ ) snake_case = model.to(A__ ) snake_case = AutoTokenizer.from_pretrained(A__ ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: snake_case = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(A__ ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" snake_case = model.config.max_length - 1 else: snake_case = model.config.max_length snake_case = tokenizer( A__ , add_special_tokens=A__ , padding=A__ , truncation=A__ , max_length=A__ , return_tensors='''pt''' , return_attention_mask=A__ , ).to(A__ ) snake_case = encodings['''input_ids'''] snake_case = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." snake_case = [] snake_case = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(A__ ) , A__ ) ): snake_case = min(start_index + batch_size , len(A__ ) ) snake_case = encoded_texts[start_index:end_index] snake_case = attn_masks[start_index:end_index] if add_start_token: snake_case = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A__ ) snake_case = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) snake_case = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A__ ), attn_mask] , dim=1 ) snake_case = encoded_batch with torch.no_grad(): snake_case = model(A__ , attention_mask=A__ ).logits snake_case = out_logits[..., :-1, :].contiguous() snake_case = labels[..., 1:].contiguous() snake_case = attn_mask[..., 1:].contiguous() snake_case = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , A__ ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(A__ )}
719
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) _lowercase = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] _lowercase = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def __UpperCamelCase ( a : List[str] ) ->Optional[int]: snake_case = torch.load(a , map_location='''cpu''' ) return sd def __UpperCamelCase ( a : Optional[int] , a : Union[str, Any] , a : int=rename_keys_prefix ) ->Tuple: snake_case = OrderedDict() snake_case = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue snake_case = key for name_pair in rename_keys_prefix: snake_case = new_key.replace(name_pair[0] , name_pair[1] ) snake_case = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately snake_case = new_d['''cls.predictions.bias'''] return new_d @torch.no_grad() def __UpperCamelCase ( a : Optional[int] , a : int ) ->Union[str, Any]: assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: snake_case = '''pretraining''' if "vcr" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 512} elif "vqa_advanced" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048} elif "vqa" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048} elif "nlvr" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 1024} else: raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 512} snake_case = '''multichoice''' elif "vqa_advanced" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048} snake_case = '''vqa_advanced''' elif "vqa" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129} snake_case = '''vqa''' elif "nlvr" in checkpoint_path: snake_case = { '''visual_embedding_dim''': 1024, '''num_labels''': 2, } snake_case = '''nlvr''' snake_case = VisualBertConfig(**a ) # Load State Dict snake_case = load_state_dict(a ) snake_case = get_new_dict(a , a ) if model_type == "pretraining": snake_case = VisualBertForPreTraining(a ) elif model_type == "vqa": snake_case = VisualBertForQuestionAnswering(a ) elif model_type == "nlvr": snake_case = VisualBertForVisualReasoning(a ) elif model_type == "multichoice": snake_case = VisualBertForMultipleChoice(a ) model.load_state_dict(a ) # Save Checkpoints Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') _lowercase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
44
0
'''simple docstring''' import math import sys def __UpperCamelCase ( a : str ) ->str: snake_case = '''''' try: with open(a , '''rb''' ) as binary_file: snake_case = binary_file.read() for dat in data: snake_case = f"""{dat:08b}""" result += curr_byte return result except OSError: print('''File not accessible''' ) sys.exit() def __UpperCamelCase ( a : str ) ->str: snake_case = {'''0''': '''0''', '''1''': '''1'''} snake_case , snake_case = '''''', '''''' snake_case = len(a ) for i in range(len(a ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue snake_case = lexicon[curr_string] result += last_match_id snake_case = last_match_id + '''0''' if math.loga(a ).is_integer(): snake_case = {} for curr_key in list(a ): snake_case = lexicon.pop(a ) snake_case = new_lex snake_case = last_match_id + '''1''' index += 1 snake_case = '''''' return result def __UpperCamelCase ( a : str , a : str ) ->None: snake_case = 8 try: with open(a , '''wb''' ) as opened_file: snake_case = [ to_write[i : i + byte_length] for i in range(0 , len(a ) , a ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('''10000000''' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(a , 2 ).to_bytes(1 , byteorder='''big''' ) ) except OSError: print('''File not accessible''' ) sys.exit() def __UpperCamelCase ( a : str ) ->str: snake_case = 0 for letter in data_bits: if letter == "1": break counter += 1 snake_case = data_bits[counter:] snake_case = data_bits[counter + 1 :] return data_bits def __UpperCamelCase ( a : str , a : str ) ->None: snake_case = read_file_binary(a ) snake_case = remove_prefix(a ) snake_case = decompress_data(a ) write_file_binary(a , a ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
720
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def __UpperCamelCase ( a : Dict , a : Optional[int] , a : Dict , a : Dict ) ->Union[str, Any]: snake_case = original_name.split('''.''' )[0] snake_case = key.split('''.''' ) snake_case = int(key_list[key_list.index(a ) - 2] ) snake_case = int(key_list[key_list.index(a ) - 1] ) snake_case = orig_block_num - offset snake_case = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" ) return key def __UpperCamelCase ( a : Tuple ) ->Dict: snake_case = OrderedDict() snake_case , snake_case = 0, 0 for key, value in state_dict.items(): if key.startswith('''network''' ): snake_case = key.replace('''network''' , '''poolformer.encoder''' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('''bias''' ) and "patch_embed" not in key: patch_emb_offset += 1 snake_case = key[: key.find('''proj''' )] snake_case = key.replace(a , f"""patch_embeddings.{total_embed_found}.""" ) snake_case = key.replace('''proj''' , '''projection''' ) if key.endswith('''bias''' ): total_embed_found += 1 if "patch_embeddings" in key: snake_case = '''poolformer.encoder.''' + key if "mlp.fc1" in key: snake_case = replace_key_with_offset(a , a , '''mlp.fc1''' , '''output.conv1''' ) if "mlp.fc2" in key: snake_case = replace_key_with_offset(a , a , '''mlp.fc2''' , '''output.conv2''' ) if "norm1" in key: snake_case = replace_key_with_offset(a , a , '''norm1''' , '''before_norm''' ) if "norm2" in key: snake_case = replace_key_with_offset(a , a , '''norm2''' , '''after_norm''' ) if "layer_scale_1" in key: snake_case = replace_key_with_offset(a , a , '''layer_scale_1''' , '''layer_scale_1''' ) if "layer_scale_2" in key: snake_case = replace_key_with_offset(a , a , '''layer_scale_2''' , '''layer_scale_2''' ) if "head" in key: snake_case = key.replace('''head''' , '''classifier''' ) snake_case = value return new_state_dict def __UpperCamelCase ( ) ->Optional[int]: snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case = Image.open(requests.get(a , stream=a ).raw ) return image @torch.no_grad() def __UpperCamelCase ( a : Dict , a : Optional[Any] , a : Tuple ) ->List[str]: snake_case = PoolFormerConfig() # set attributes based on model_name snake_case = '''huggingface/label-files''' snake_case = model_name[-3:] snake_case = 1000 snake_case = '''imagenet-1k-id2label.json''' snake_case = (1, 1000) # set config attributes snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) ) snake_case = {int(a ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} if size == "s12": snake_case = [2, 2, 6, 2] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 0.9 elif size == "s24": snake_case = [4, 4, 12, 4] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 0.9 elif size == "s36": snake_case = [6, 6, 18, 6] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.9 elif size == "m36": snake_case = [6, 6, 18, 6] snake_case = [96, 192, 384, 768] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.95 elif size == "m48": snake_case = [8, 8, 24, 8] snake_case = [96, 192, 384, 768] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.95 else: raise ValueError(f"""Size {size} not supported""" ) # load image processor snake_case = PoolFormerImageProcessor(crop_pct=a ) # Prepare image snake_case = prepare_img() snake_case = image_processor(images=a , return_tensors='''pt''' ).pixel_values logger.info(f"""Converting model {model_name}...""" ) # load original state dict snake_case = torch.load(a , map_location=torch.device('''cpu''' ) ) # rename keys snake_case = rename_keys(a ) # create HuggingFace model and load state dict snake_case = PoolFormerForImageClassification(a ) model.load_state_dict(a ) model.eval() # Define image processor snake_case = PoolFormerImageProcessor(crop_pct=a ) snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values # forward pass snake_case = model(a ) snake_case = outputs.logits # define expected logit slices for different models if size == "s12": snake_case = torch.tensor([-0.3045, -0.6758, -0.4869] ) elif size == "s24": snake_case = torch.tensor([0.4402, -0.1374, -0.8045] ) elif size == "s36": snake_case = torch.tensor([-0.6080, -0.5133, -0.5898] ) elif size == "m36": snake_case = torch.tensor([0.3952, 0.2263, -1.2668] ) elif size == "m48": snake_case = torch.tensor([0.1167, -0.0656, -0.3423] ) else: raise ValueError(f"""Size {size} not supported""" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a , atol=1e-2 ) # finally, save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) _lowercase = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
44
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _lowercase = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
721
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow _lowercase = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) _lowercase = logging.getLogger() def __UpperCamelCase ( ) ->Tuple: snake_case = argparse.ArgumentParser() parser.add_argument('''-f''' ) snake_case = parser.parse_args() return args.f def __UpperCamelCase ( a : Dict , a : Tuple="eval" ) ->List[Any]: snake_case = os.path.join(a , f"""{split}_results.json""" ) if os.path.exists(a ): with open(a , '''r''' ) as f: return json.load(a ) raise ValueError(f"""can't find {path}""" ) _lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _lowercase ( __a ): def UpperCamelCase ( self ) -> List[str]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(A__ , '''argv''' , A__ ): run_flax_glue.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 ) @slow def UpperCamelCase ( self ) -> List[Any]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A__ , '''argv''' , A__ ): run_clm_flax.main() snake_case = get_results(A__ ) self.assertLess(result['''eval_perplexity'''] , 1_00 ) @slow def UpperCamelCase ( self ) -> int: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(A__ , '''argv''' , A__ ): run_summarization_flax.main() snake_case = get_results(A__ , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(A__ , '''argv''' , A__ ): run_mlm_flax.main() snake_case = get_results(A__ ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def UpperCamelCase ( self ) -> Dict: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A__ , '''argv''' , A__ ): run_ta_mlm_flax.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 ) @slow def UpperCamelCase ( self ) -> int: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu snake_case = 7 if get_gpu_count() > 1 else 2 snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(A__ , '''argv''' , A__ ): run_flax_ner.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def UpperCamelCase ( self ) -> Any: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(A__ , '''argv''' , A__ ): run_qa.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
44
0
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def __UpperCamelCase ( a : np.ndarray , a : np.ndarray ) ->float: return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(a , a ) ) ) def __UpperCamelCase ( a : np.ndarray , a : np.ndarray ) ->list[list[list[float] | float]]: if dataset.ndim != value_array.ndim: snake_case = ( '''Wrong input data\'s dimensions... ''' f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}""" ) raise ValueError(a ) try: if dataset.shape[1] != value_array.shape[1]: snake_case = ( '''Wrong input data\'s shape... ''' f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}""" ) raise ValueError(a ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('''Wrong shape''' ) if dataset.dtype != value_array.dtype: snake_case = ( '''Input data have different datatype... ''' f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}""" ) raise TypeError(a ) snake_case = [] for value in value_array: snake_case = euclidean(a , dataset[0] ) snake_case = dataset[0].tolist() for dataset_value in dataset[1:]: snake_case = euclidean(a , a ) if dist > temp_dist: snake_case = temp_dist snake_case = dataset_value.tolist() answer.append([vector, dist] ) return answer def __UpperCamelCase ( a : np.ndarray , a : np.ndarray ) ->float: return np.dot(a , a ) / (norm(a ) * norm(a )) if __name__ == "__main__": import doctest doctest.testmod()
700
'''simple docstring''' from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS _lowercase = logging.get_logger(__name__) _lowercase = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, 'constant': get_constant_schedule, 'constant_w_warmup': get_constant_schedule_with_warmup, } class _lowercase ( __a ): def __init__( self , A__=None , A__=None , *A__ , **A__ ) -> Union[str, Any]: super().__init__(*A__ , **A__ ) if config is None: assert isinstance(self.model , A__ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F""" {self.model.__class__}""" ) snake_case = self.model.config else: snake_case = config snake_case = data_args snake_case = self.config.tgt_vocab_size if isinstance(self.config , A__ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for""" ''' padding..''' ) if self.args.label_smoothing == 0: snake_case = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss snake_case = label_smoothed_nll_loss def UpperCamelCase ( self , A__ ) -> Tuple: if self.optimizer is None: snake_case = ['''bias''', '''LayerNorm.weight'''] snake_case = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] snake_case = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: snake_case = Adafactor snake_case = {'''scale_parameter''': False, '''relative_step''': False} else: snake_case = AdamW snake_case = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } snake_case = self.args.learning_rate if self.sharded_ddp: snake_case = OSS( params=A__ , optim=A__ , **A__ , ) else: snake_case = optimizer_cls(A__ , **A__ ) if self.lr_scheduler is None: snake_case = self._get_lr_scheduler(A__ ) else: # ignoring --lr_scheduler logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' ) def UpperCamelCase ( self , A__ ) -> Tuple: snake_case = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": snake_case = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": snake_case = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: snake_case = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A__ ) return scheduler def UpperCamelCase ( self ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[Any]: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token snake_case = model(**A__ , use_cache=A__ )[0] snake_case = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models snake_case , snake_case = model(**A__ , labels=A__ , use_cache=A__ )[:2] else: # compute label smoothed loss snake_case = model(**A__ , use_cache=A__ )[0] snake_case = torch.nn.functional.log_softmax(A__ , dim=-1 ) snake_case , snake_case = self.loss_fn(A__ , A__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def UpperCamelCase ( self , A__ , A__ ) -> Any: snake_case = inputs.pop('''labels''' ) snake_case , snake_case = self._compute_loss(A__ , A__ , A__ ) return loss def UpperCamelCase ( self , A__ , A__ , A__ , A__ = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: snake_case = self._prepare_inputs(A__ ) snake_case = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: snake_case = self.model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **A__ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] ) snake_case = inputs.pop('''labels''' ) with torch.no_grad(): # compute loss on predict data snake_case , snake_case = self._compute_loss(A__ , A__ , A__ ) snake_case = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) snake_case = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] ) return (loss, logits, labels) def UpperCamelCase ( self , A__ , A__ ) -> List[str]: # If PAD token is not defined at least EOS token has to be defined snake_case = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( '''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be''' F""" padded to `max_length`={max_length}""" ) snake_case = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) snake_case = tensor return padded_tensor
44
0
'''simple docstring''' import math import unittest def __UpperCamelCase ( a : int ) ->bool: assert isinstance(a , a ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class _lowercase ( unittest.TestCase ): def UpperCamelCase ( self ) -> List[Any]: self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def UpperCamelCase ( self ) -> str: with self.assertRaises(A__ ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , ) self.assertFalse( is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
701
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def __UpperCamelCase ( a : List[str] ) ->str: snake_case = [] for line in lines: snake_case = re.sub(R'''#.*''' , '''''' , a ) # remove comments if line: filtered_lines.append(a ) snake_case = '''\n'''.join(a ) # Make a hash from all this code snake_case = full_str.encode('''utf-8''' ) return shaaaa(a ).hexdigest() # get importable module names and hash for caching _lowercase = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions _lowercase = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _lowercase = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name _lowercase = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
44
0
'''simple docstring''' import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = {'vocab_file': 'vocab.txt'} _lowercase = { 'vocab_file': { 'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt', }, } _lowercase = { 'openbmb/cpm-ant-10b': 1_024, } def __UpperCamelCase ( a : Tuple ) ->List[Any]: snake_case = collections.OrderedDict() with open(a , '''r''' , encoding='''utf-8''' ) as reader: snake_case = reader.readlines() for index, token in enumerate(a ): snake_case = token.rstrip('''\n''' ) snake_case = index return vocab class _lowercase ( __a ): def __init__( self , A__ , A__="<unk>" , A__=2_00 ): snake_case = vocab snake_case = unk_token snake_case = max_input_chars_per_word def UpperCamelCase ( self , A__ ): snake_case = list(A__ ) if len(A__ ) > self.max_input_chars_per_word: return [self.unk_token] snake_case = 0 snake_case = [] while start < len(A__ ): snake_case = len(A__ ) snake_case = None while start < end: snake_case = ''''''.join(chars[start:end] ) if substr in self.vocab: snake_case = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(A__ ) snake_case = end return sub_tokens class _lowercase ( __a ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = ['''input_ids''', '''attention_mask'''] _UpperCAmelCase = False def __init__( self , A__ , A__="<d>" , A__="</d>" , A__="<s>" , A__="</s>" , A__="<pad>" , A__="<unk>" , A__="</n>" , A__="</_>" , A__="left" , **A__ , ): requires_backends(self , ['''jieba'''] ) super().__init__( bod_token=A__ , eod_token=A__ , bos_token=A__ , eos_token=A__ , pad_token=A__ , unk_token=A__ , line_token=A__ , space_token=A__ , padding_side=A__ , **A__ , ) snake_case = bod_token snake_case = eod_token snake_case = load_vocab(A__ ) snake_case = self.encoder[space_token] snake_case = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda A__ : x[1] ) ) snake_case = {v: k for k, v in self.encoder.items()} snake_case = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def UpperCamelCase ( self ): return self.encoder[self.bod_token] @property def UpperCamelCase ( self ): return self.encoder[self.eod_token] @property def UpperCamelCase ( self ): return self.encoder["\n"] @property def UpperCamelCase ( self ): return len(self.encoder ) def UpperCamelCase ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def UpperCamelCase ( self , A__ ): snake_case = [] for x in jieba.cut(A__ , cut_all=A__ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(A__ ) ) return output_tokens def UpperCamelCase ( self , A__ , **A__ ): snake_case = [i for i in token_ids if i >= 0] snake_case = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(A__ , **A__ ) def UpperCamelCase ( self , A__ ): return token in self.encoder def UpperCamelCase ( self , A__ ): return "".join(A__ ) def UpperCamelCase ( self , A__ ): return self.encoder.get(A__ , self.encoder.get(self.unk_token ) ) def UpperCamelCase ( self , A__ ): return self.decoder.get(A__ , self.unk_token ) def UpperCamelCase ( self , A__ , A__ = None ): if os.path.isdir(A__ ): snake_case = os.path.join( A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: snake_case = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory snake_case = 0 if " " in self.encoder: snake_case = self.encoder[''' '''] del self.encoder[" "] if "\n" in self.encoder: snake_case = self.encoder['''\n'''] del self.encoder["\n"] snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda A__ : x[1] ) ) with open(A__ , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) snake_case = token_index writer.write(token + '''\n''' ) index += 1 return (vocab_file,) def UpperCamelCase ( self , A__ , A__ = None ): if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def UpperCamelCase ( self , A__ , A__ = None , A__ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ ) if token_ids_a is not None: return [1] + ([0] * len(A__ )) + [1] + ([0] * len(A__ )) return [1] + ([0] * len(A__ ))
702
'''simple docstring''' _lowercase = { 'Pillow': 'Pillow', 'accelerate': 'accelerate>=0.11.0', 'compel': 'compel==0.1.8', 'black': 'black~=23.1', 'datasets': 'datasets', 'filelock': 'filelock', 'flax': 'flax>=0.4.1', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.13.2', 'requests-mock': 'requests-mock==1.10.0', 'importlib_metadata': 'importlib_metadata', 'invisible-watermark': 'invisible-watermark', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2', 'jaxlib': 'jaxlib>=0.1.65', 'Jinja2': 'Jinja2', 'k-diffusion': 'k-diffusion>=0.0.12', 'torchsde': 'torchsde', 'note_seq': 'note_seq', 'librosa': 'librosa', 'numpy': 'numpy', 'omegaconf': 'omegaconf', 'parameterized': 'parameterized', 'protobuf': 'protobuf>=3.20.3,<4', 'pytest': 'pytest', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'ruff': 'ruff>=0.0.241', 'safetensors': 'safetensors', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'scipy': 'scipy', 'onnx': 'onnx', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'tensorboard': 'tensorboard', 'torch': 'torch>=1.4', 'torchvision': 'torchvision', 'transformers': 'transformers>=4.25.1', 'urllib3': 'urllib3<=2.0.0', }
44
0
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowercase ( __a ): _UpperCAmelCase = ['''image_processor''', '''tokenizer'''] _UpperCAmelCase = '''ViltImageProcessor''' _UpperCAmelCase = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self , A__=None , A__=None , **A__ ) -> Union[str, Any]: snake_case = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , A__ , ) snake_case = kwargs.pop('''feature_extractor''' ) snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(A__ , A__ ) snake_case = self.image_processor def __call__( self , A__ , A__ = None , A__ = True , A__ = False , A__ = None , A__ = None , A__ = 0 , A__ = None , A__ = None , A__ = None , A__ = False , A__ = False , A__ = False , A__ = False , A__ = True , A__ = None , **A__ , ) -> BatchEncoding: snake_case = self.tokenizer( text=A__ , add_special_tokens=A__ , padding=A__ , truncation=A__ , max_length=A__ , stride=A__ , pad_to_multiple_of=A__ , return_token_type_ids=A__ , return_attention_mask=A__ , return_overflowing_tokens=A__ , return_special_tokens_mask=A__ , return_offsets_mapping=A__ , return_length=A__ , verbose=A__ , return_tensors=A__ , **A__ , ) # add pixel_values + pixel_mask snake_case = self.image_processor(A__ , return_tensors=A__ ) encoding.update(A__ ) return encoding def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[int]: return self.tokenizer.batch_decode(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> Union[str, Any]: return self.tokenizer.decode(*A__ , **A__ ) @property def UpperCamelCase ( self ) -> int: snake_case = self.tokenizer.model_input_names snake_case = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCamelCase ( self ) -> Tuple: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A__ , ) return self.image_processor_class @property def UpperCamelCase ( self ) -> Optional[int]: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , A__ , ) return self.image_processor
703
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowercase ( __a , __a , unittest.TestCase ): _UpperCAmelCase = IFInpaintingSuperResolutionPipeline _UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} _UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) _UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''} def UpperCamelCase ( self ) -> int: return self._get_superresolution_dummy_components() def UpperCamelCase ( self , A__ , A__=0 ) -> Union[str, Any]: if str(A__ ).startswith('''mps''' ): snake_case = torch.manual_seed(A__ ) else: snake_case = torch.Generator(device=A__ ).manual_seed(A__ ) snake_case = floats_tensor((1, 3, 16, 16) , rng=random.Random(A__ ) ).to(A__ ) snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ ) snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ ) snake_case = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase ( self ) -> List[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def UpperCamelCase ( self ) -> Optional[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def UpperCamelCase ( self ) -> List[str]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def UpperCamelCase ( self ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def UpperCamelCase ( self ) -> Optional[Any]: self._test_save_load_local() def UpperCamelCase ( self ) -> Dict: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
44
0
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": _lowercase = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') _lowercase = parser.parse_args() if args.model_type == "bert": _lowercase = BertForMaskedLM.from_pretrained(args.model_name) _lowercase = 'bert' else: raise ValueError('args.model_type should be "bert".') _lowercase = model.state_dict() _lowercase = {} for w in ["word_embeddings", "position_embeddings"]: _lowercase = state_dict[f'{prefix}.embeddings.{w}.weight'] for w in ["weight", "bias"]: _lowercase = state_dict[f'{prefix}.embeddings.LayerNorm.{w}'] _lowercase = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: _lowercase = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}' ] _lowercase = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}' ] _lowercase = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}' ] _lowercase = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}' ] _lowercase = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}' ] _lowercase = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}' ] _lowercase = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}' ] _lowercase = state_dict[ f'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}' ] std_idx += 1 _lowercase = state_dict['cls.predictions.decoder.weight'] _lowercase = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: _lowercase = state_dict[f'cls.predictions.transform.dense.{w}'] _lowercase = state_dict[f'cls.predictions.transform.LayerNorm.{w}'] print(f'N layers selected for distillation: {std_idx}') print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}') print(f'Save transferred checkpoint to {args.dump_checkpoint}.') torch.save(compressed_sd, args.dump_checkpoint)
704
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy _lowercase = logging.get_logger(__name__) class _lowercase ( __a ): def __init__( self , A__ , A__ , A__ , **A__ ) -> Union[str, Any]: snake_case = feature_size snake_case = sampling_rate snake_case = padding_value snake_case = kwargs.pop('''padding_side''' , '''right''' ) snake_case = kwargs.pop('''return_attention_mask''' , A__ ) super().__init__(**A__ ) def UpperCamelCase ( self , A__ , A__ = True , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , ) -> BatchFeature: # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(A__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): snake_case = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( '''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`''' F""" to this method that includes {self.model_input_names[0]}, but you provided""" F""" {list(processed_features.keys() )}""" ) snake_case = processed_features[self.model_input_names[0]] snake_case = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(A__ ) == 0: if return_attention_mask: snake_case = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch snake_case = required_input[0] if isinstance(A__ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. snake_case = 0 while len(required_input[index] ) == 0: index += 1 if index < len(A__ ): snake_case = required_input[index][0] if return_tensors is None: if is_tf_tensor(A__ ): snake_case = '''tf''' elif is_torch_tensor(A__ ): snake_case = '''pt''' elif isinstance(A__ , (int, float, list, tuple, np.ndarray) ): snake_case = '''np''' else: raise ValueError( F"""type of {first_element} unknown: {type(A__ )}. """ '''Should be one of a python, numpy, pytorch or tensorflow object.''' ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): snake_case = to_numpy(A__ ) else: snake_case = [to_numpy(A__ ) for v in value] # Convert padding_strategy in PaddingStrategy snake_case = self._get_padding_strategies(padding=A__ , max_length=A__ ) snake_case = processed_features[self.model_input_names[0]] snake_case = len(A__ ) if not all(len(A__ ) == batch_size for v in processed_features.values() ): raise ValueError('''Some items in the output dictionary have a different batch size than others.''' ) snake_case = [] for i in range(A__ ): snake_case = {k: v[i] for k, v in processed_features.items()} # truncation snake_case = self._truncate( A__ , max_length=A__ , pad_to_multiple_of=A__ , truncation=A__ , ) truncated_inputs.append(A__ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) snake_case = PaddingStrategy.MAX_LENGTH snake_case = {} for i in range(A__ ): # padding snake_case = self._pad( truncated_inputs[i] , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , ) for key, value in outputs.items(): if key not in batch_outputs: snake_case = [] if value.dtype is np.dtype(np.floataa ): snake_case = value.astype(np.floataa ) batch_outputs[key].append(A__ ) return BatchFeature(A__ , tensor_type=A__ ) def UpperCamelCase ( self , A__ , A__ = None , A__ = PaddingStrategy.DO_NOT_PAD , A__ = None , A__ = None , ) -> dict: snake_case = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: snake_case = len(A__ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A__ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: snake_case = np.ones(len(A__ ) , dtype=np.intaa ) if needs_to_be_padded: snake_case = max_length - len(A__ ) if self.padding_side == "right": if return_attention_mask: snake_case = np.pad( processed_features['''attention_mask'''] , (0, difference) ) snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) snake_case = np.pad( A__ , A__ , '''constant''' , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: snake_case = np.pad( processed_features['''attention_mask'''] , (difference, 0) ) snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) snake_case = np.pad( A__ , A__ , '''constant''' , constant_values=self.padding_value ) else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return processed_features def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , ) -> Union[str, Any]: if not truncation: return processed_features elif truncation and max_length is None: raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' ) snake_case = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of snake_case = len(A__ ) > max_length if needs_to_be_truncated: snake_case = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: snake_case = processed_features['''attention_mask'''][:max_length] return processed_features def UpperCamelCase ( self , A__=False , A__=None ) -> Union[str, Any]: # Get padding strategy if padding is not False: if padding is True: snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(A__ , A__ ): snake_case = PaddingStrategy(A__ ) elif isinstance(A__ , A__ ): snake_case = padding else: snake_case = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( '''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use''' ''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' ) return padding_strategy
44
0
'''simple docstring''' def __UpperCamelCase ( a : int ) ->bool: if not isinstance(a , a ): raise ValueError('''check_bouncy() accepts only integer arguments''' ) snake_case = str(a ) snake_case = ''''''.join(sorted(a ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def __UpperCamelCase ( a : float = 99 ) ->int: if not 0 < percent < 100: raise ValueError('''solution() only accepts values from 0 to 100''' ) snake_case = 0 snake_case = 1 while True: if check_bouncy(a ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'{solution(99)}')
705
'''simple docstring''' from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _lowercase ( yaml.SafeLoader ): def UpperCamelCase ( self , A__ ) -> List[str]: snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value] snake_case = [tuple(A__ ) if isinstance(A__ , A__ ) else key for key in keys] snake_case = Counter(A__ ) snake_case = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def UpperCamelCase ( self , A__ , A__=False ) -> List[Any]: snake_case = super().construct_mapping(A__ , deep=A__ ) self._check_no_duplicates_on_constructed_node(A__ ) return mapping def __UpperCamelCase ( a : str ) ->Tuple[Optional[str], str]: snake_case = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: snake_case = full_content[1:].index('''---''' ) + 1 snake_case = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(a ) class _lowercase ( __a ): # class attributes _UpperCAmelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata @classmethod def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata": with open(A__ , encoding='''utf-8''' ) as readme_file: snake_case , snake_case = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(A__ ) else: return cls() def UpperCamelCase ( self , A__ ) -> str: if path.exists(): with open(A__ , encoding='''utf-8''' ) as readme_file: snake_case = readme_file.read() else: snake_case = None snake_case = self._to_readme(A__ ) with open(A__ , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(A__ ) def UpperCamelCase ( self , A__ = None ) -> str: if readme_content is not None: snake_case , snake_case = _split_yaml_from_readme(A__ ) snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata": snake_case = yaml.load(A__ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields snake_case = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**A__ ) def UpperCamelCase ( self ) -> str: return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=A__ , allow_unicode=A__ , encoding='''utf-8''' , ).decode('''utf-8''' ) _lowercase = { 'image-classification': [], 'translation': [], 'image-segmentation': [], 'fill-mask': [], 'automatic-speech-recognition': [], 'token-classification': [], 'sentence-similarity': [], 'audio-classification': [], 'question-answering': [], 'summarization': [], 'zero-shot-classification': [], 'table-to-text': [], 'feature-extraction': [], 'other': [], 'multiple-choice': [], 'text-classification': [], 'text-to-image': [], 'text2text-generation': [], 'zero-shot-image-classification': [], 'tabular-classification': [], 'tabular-regression': [], 'image-to-image': [], 'tabular-to-text': [], 'unconditional-image-generation': [], 'text-retrieval': [], 'text-to-speech': [], 'object-detection': [], 'audio-to-audio': [], 'text-generation': [], 'conversational': [], 'table-question-answering': [], 'visual-question-answering': [], 'image-to-text': [], 'reinforcement-learning': [], 'voice-activity-detection': [], 'time-series-forecasting': [], 'document-question-answering': [], } if __name__ == "__main__": from argparse import ArgumentParser _lowercase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.') ap.add_argument('readme_filepath') _lowercase = ap.parse_args() _lowercase = Path(args.readme_filepath) _lowercase = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
44
0
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class _lowercase ( unittest.TestCase ): def UpperCamelCase ( self , A__ ) -> Optional[Any]: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): snake_case = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(A__ ) def UpperCamelCase ( self ) -> int: snake_case = '''sshleifer/tiny-gpt2''' snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A__ , multi_process=A__ , ) snake_case = TensorFlowBenchmark(A__ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase ( self ) -> Optional[Any]: snake_case = '''sgugger/tiny-distilbert-classification''' snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , only_pretrain_model=A__ , ) snake_case = TensorFlowBenchmark(A__ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase ( self ) -> Any: snake_case = '''sshleifer/tiny-gpt2''' snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , ) snake_case = TensorFlowBenchmark(A__ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase ( self ) -> Tuple: snake_case = '''sshleifer/tiny-gpt2''' snake_case = AutoConfig.from_pretrained(A__ ) snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A__ , multi_process=A__ , ) snake_case = TensorFlowBenchmark(A__ , [config] ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase ( self ) -> Tuple: snake_case = '''sshleifer/tiny-gpt2''' snake_case = AutoConfig.from_pretrained(A__ ) snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , ) snake_case = TensorFlowBenchmark(A__ , [config] ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase ( self ) -> List[str]: snake_case = '''sshleifer/tiny-gpt2''' snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , ) snake_case = TensorFlowBenchmark(A__ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase ( self ) -> str: snake_case = '''sshleifer/tiny-gpt2''' snake_case = AutoConfig.from_pretrained(A__ ) snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , ) snake_case = TensorFlowBenchmark(A__ , [config] ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = '''patrickvonplaten/t5-tiny-random''' snake_case = AutoConfig.from_pretrained(A__ ) snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , ) snake_case = TensorFlowBenchmark(A__ , configs=[config] ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = '''sshleifer/tiny-gpt2''' snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=A__ , multi_process=A__ , ) snake_case = TensorFlowBenchmark(A__ ) snake_case = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase ( self ) -> List[str]: snake_case = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=A__ , save_to_csv=A__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A__ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(A__ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(A__ , '''env.csv''' ) , multi_process=A__ , ) snake_case = TensorFlowBenchmark(A__ ) benchmark.run() self.assertTrue(Path(os.path.join(A__ , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(A__ , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(A__ , '''env.csv''' ) ).exists() ) def UpperCamelCase ( self ) -> List[Any]: snake_case = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(A__ ): self.assertTrue(hasattr(A__ , '''sequential''' ) ) self.assertTrue(hasattr(A__ , '''cumulative''' ) ) self.assertTrue(hasattr(A__ , '''current''' ) ) self.assertTrue(hasattr(A__ , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: snake_case = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A__ , '''log.txt''' ) , log_print=A__ , trace_memory_line_by_line=A__ , eager_mode=A__ , multi_process=A__ , ) snake_case = TensorFlowBenchmark(A__ ) snake_case = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(A__ , '''log.txt''' ) ).exists() )
706
'''simple docstring''' import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowercase ( __a , unittest.TestCase ): _UpperCAmelCase = CodeGenTokenizer _UpperCAmelCase = CodeGenTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = {'''add_prefix_space''': True} _UpperCAmelCase = False def UpperCamelCase ( self ) -> Tuple: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] snake_case = dict(zip(A__ , range(len(A__ ) ) ) ) snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] snake_case = {'''unk_token''': '''<unk>'''} snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(A__ ) ) def UpperCamelCase ( self , **A__ ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self , **A__ ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self , A__ ) -> Tuple: snake_case = '''lower newer''' snake_case = '''lower newer''' return input_text, output_text def UpperCamelCase ( self ) -> List[Any]: snake_case = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case = '''lower newer''' snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ ) self.assertListEqual(A__ , A__ ) snake_case = tokens + [tokenizer.unk_token] snake_case = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ ) def UpperCamelCase ( self ) -> Optional[int]: if not self.test_rust_tokenizer: return snake_case = self.get_tokenizer() snake_case = self.get_rust_tokenizer(add_prefix_space=A__ ) snake_case = '''lower newer''' # Testing tokenization snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ ) snake_case = rust_tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) # Testing conversion to ids without special tokens snake_case = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ ) snake_case = rust_tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertListEqual(A__ , A__ ) # Testing conversion to ids with special tokens snake_case = self.get_rust_tokenizer(add_prefix_space=A__ ) snake_case = tokenizer.encode(A__ , add_prefix_space=A__ ) snake_case = rust_tokenizer.encode(A__ ) self.assertListEqual(A__ , A__ ) # Testing the unknown token snake_case = tokens + [rust_tokenizer.unk_token] snake_case = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A__ ) , A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> List[str]: # It's very difficult to mix/test pretokenization with byte-level # And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def UpperCamelCase ( self , A__=15 ) -> Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) # Simple input snake_case = '''This is a simple input''' snake_case = ['''This is a simple input 1''', '''This is a simple input 2'''] snake_case = ('''This is a simple input''', '''This is a pair''') snake_case = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' ) # Simple input self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' ) # Simple input self.assertRaises( A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , ) # Pair input self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' ) # Pair input self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' ) # Pair input self.assertRaises( A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , ) def UpperCamelCase ( self ) -> Tuple: snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input snake_case = '''This is a simple input''' snake_case = ['''This is a simple input looooooooong''', '''This is a simple input'''] snake_case = ('''This is a simple input''', '''This is a pair''') snake_case = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] snake_case = tokenizer.pad_token_id snake_case = tokenizer(A__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' ) snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' ) snake_case = tokenizer(*A__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' ) snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def UpperCamelCase ( self ) -> str: snake_case = '''$$$''' snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=A__ , add_bos_token=A__ ) snake_case = '''This is a simple input''' snake_case = ['''This is a simple input 1''', '''This is a simple input 2'''] snake_case = tokenizer.bos_token_id snake_case = tokenizer(A__ ) snake_case = tokenizer(A__ ) self.assertEqual(out_s.input_ids[0] , A__ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) snake_case = tokenizer.decode(out_s.input_ids ) snake_case = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , A__ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def UpperCamelCase ( self ) -> Any: snake_case = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' ) snake_case = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#''' snake_case = '''\nif len_a > len_b: result = a\nelse: result = b''' snake_case = tokenizer.encode(A__ ) snake_case = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n'''] snake_case = tokenizer.decode(A__ , truncate_before_pattern=A__ ) self.assertEqual(A__ , A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: pass
44
0
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ 'MRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MraForMaskedLM', 'MraForMultipleChoice', 'MraForQuestionAnswering', 'MraForSequenceClassification', 'MraForTokenClassification', 'MraLayer', 'MraModel', 'MraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure)
707
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowercase : def __init__( self , A__ , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.0_2 , A__=3 , A__=None , ) -> List[Any]: snake_case = parent snake_case = batch_size snake_case = image_size snake_case = patch_size snake_case = num_channels snake_case = is_training snake_case = use_labels snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = intermediate_size snake_case = hidden_act snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = type_sequence_label_size snake_case = initializer_range snake_case = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case = (image_size // patch_size) ** 2 snake_case = num_patches + 1 def UpperCamelCase ( self ) -> int: snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case = None if self.use_labels: snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self ) -> int: return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]: snake_case = TFViTModel(config=A__ ) snake_case = model(A__ , training=A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. snake_case = self.image_size // 2 snake_case = pixel_values[:, :, :image_size, :image_size] snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ ) snake_case = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[int]: snake_case = self.type_sequence_label_size snake_case = TFViTForImageClassification(A__ ) snake_case = model(A__ , labels=A__ , training=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. snake_case = self.image_size // 2 snake_case = pixel_values[:, :, :image_size, :image_size] snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case = 1 snake_case = TFViTForImageClassification(A__ ) snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.prepare_config_and_inputs() snake_case , snake_case , snake_case = config_and_inputs snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class _lowercase ( __a , __a , unittest.TestCase ): _UpperCAmelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () _UpperCAmelCase = ( {'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def UpperCamelCase ( self ) -> List[Any]: snake_case = TFViTModelTester(self ) snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 ) def UpperCamelCase ( self ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def UpperCamelCase ( self ) -> int: pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def UpperCamelCase ( self ) -> str: pass def UpperCamelCase ( self ) -> Union[str, Any]: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = model_class(A__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) snake_case = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A__ , tf.keras.layers.Layer ) ) def UpperCamelCase ( self ) -> List[Any]: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = model_class(A__ ) snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case = [*signature.parameters.keys()] snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def UpperCamelCase ( self ) -> Optional[Any]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A__ ) @slow def UpperCamelCase ( self ) -> Any: snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(A__ ) def __UpperCamelCase ( ) ->Any: snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class _lowercase ( unittest.TestCase ): @cached_property def UpperCamelCase ( self ) -> Optional[int]: return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def UpperCamelCase ( self ) -> Dict: snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) snake_case = self.default_image_processor snake_case = prepare_img() snake_case = image_processor(images=A__ , return_tensors='''tf''' ) # forward pass snake_case = model(**A__ ) # verify the logits snake_case = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , A__ ) snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ) tf.debugging.assert_near(outputs.logits[0, :3] , A__ , atol=1e-4 )
44
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) _lowercase = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } _lowercase = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def __UpperCamelCase ( a : Any ) ->List[str]: snake_case = {} with open(a , '''r''' ) as file: for line_number, line in enumerate(a ): snake_case = line.strip() if line: snake_case = line.split() snake_case = line_number snake_case = words[0] snake_case = value return result def __UpperCamelCase ( a : Tuple , a : Tuple , a : Optional[int] , a : int , a : str ) ->Any: for attribute in key.split('''.''' ): snake_case = getattr(a , a ) snake_case = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(a ): snake_case = PARAM_MAPPING[full_name.split('''.''' )[-1]] snake_case = '''param''' if weight_type is not None and weight_type != "param": snake_case = getattr(a , a ).shape elif weight_type is not None and weight_type == "param": snake_case = hf_pointer for attribute in hf_param_name.split('''.''' ): snake_case = getattr(a , a ) snake_case = shape_pointer.shape # let's reduce dimension snake_case = value[0] else: snake_case = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case = value elif weight_type == "weight_g": snake_case = value elif weight_type == "weight_v": snake_case = value elif weight_type == "bias": snake_case = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): snake_case = getattr(a , a ) snake_case = value else: snake_case = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __UpperCamelCase ( a : Optional[int] , a : Optional[Any] , a : Optional[int] , a : Any , a : int ) ->Dict: snake_case = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(a ): snake_case = PARAM_MAPPING[full_name.split('''.''' )[-1]] snake_case = '''param''' if weight_type is not None and weight_type != "param": snake_case = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case = '''.'''.join([key, hf_param_name] ) else: snake_case = key snake_case = value if '''lm_head''' in full_key else value[0] _lowercase = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def __UpperCamelCase ( a : List[Any] , a : Optional[int] , a : Optional[Any]=None , a : Any=None ) ->Any: snake_case = False for key, mapped_key in MAPPING.items(): snake_case = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: snake_case = True if "*" in mapped_key: snake_case = name.split(a )[0].split('''.''' )[-2] snake_case = mapped_key.replace('''*''' , a ) if "weight_g" in name: snake_case = '''weight_g''' elif "weight_v" in name: snake_case = '''weight_v''' elif "bias" in name: snake_case = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case = '''weight''' else: snake_case = None if hf_dict is not None: rename_dict(a , a , a , a , a ) else: set_recursively(a , a , a , a , a ) return is_used return is_used def __UpperCamelCase ( a : List[Any] , a : Union[str, Any] , a : str ) ->Any: snake_case = [] snake_case = fairseq_model.state_dict() snake_case = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case = False if "conv_layers" in name: load_conv_layer( a , a , a , a , hf_model.config.feat_extract_norm == '''group''' , ) snake_case = True else: snake_case = load_wavaveca_layer(a , a , a ) if not is_used: unused_weights.append(a ) logger.warning(f"""Unused weights: {unused_weights}""" ) def __UpperCamelCase ( a : Union[str, Any] , a : List[str] , a : Union[str, Any] , a : Dict , a : List[str] ) ->Tuple: snake_case = full_name.split('''conv_layers.''' )[-1] snake_case = name.split('''.''' ) snake_case = int(items[0] ) snake_case = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) snake_case = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(a ) @torch.no_grad() def __UpperCamelCase ( a : Tuple , a : Tuple , a : List[str]=None , a : Tuple=None , a : str=True , a : Tuple=False ) ->Dict: if config_path is not None: snake_case = WavaVecaConfig.from_pretrained(a ) else: snake_case = WavaVecaConfig() if is_seq_class: snake_case = read_txt_into_dict(a ) snake_case = idalabel snake_case = WavaVecaForSequenceClassification(a ) snake_case = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=a , return_attention_mask=a , ) feature_extractor.save_pretrained(a ) elif is_finetuned: if dict_path: snake_case = Dictionary.load(a ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case = target_dict.pad_index snake_case = target_dict.bos_index snake_case = target_dict.eos_index snake_case = len(target_dict.symbols ) snake_case = os.path.join(a , '''vocab.json''' ) if not os.path.isdir(a ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(a ) ) return os.makedirs(a , exist_ok=a ) snake_case = target_dict.indices # fairseq has the <pad> and <s> switched snake_case = 0 snake_case = 1 with open(a , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(a , a ) snake_case = WavaVecaCTCTokenizer( a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=a , ) snake_case = True if config.feat_extract_norm == '''layer''' else False snake_case = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=a , return_attention_mask=a , ) snake_case = WavaVecaProcessor(feature_extractor=a , tokenizer=a ) processor.save_pretrained(a ) snake_case = WavaVecaForCTC(a ) else: snake_case = WavaVecaForPreTraining(a ) if is_finetuned or is_seq_class: snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: snake_case = argparse.Namespace(task='''audio_pretraining''' ) snake_case = fairseq.tasks.setup_task(a ) snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a ) snake_case = model[0].eval() recursively_load_weights(a , a , not is_finetuned ) hf_wavavec.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) _lowercase = parser.parse_args() _lowercase = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
708
'''simple docstring''' import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path _lowercase = [ {'dataset': 'wikipedia', 'config_name': '20220301.de'}, {'dataset': 'wikipedia', 'config_name': '20220301.en'}, {'dataset': 'wikipedia', 'config_name': '20220301.fr'}, {'dataset': 'wikipedia', 'config_name': '20220301.frr'}, {'dataset': 'wikipedia', 'config_name': '20220301.it'}, {'dataset': 'wikipedia', 'config_name': '20220301.simple'}, {'dataset': 'snli', 'config_name': 'plain_text'}, {'dataset': 'eli5', 'config_name': 'LFQA_reddit'}, {'dataset': 'wiki40b', 'config_name': 'en'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'}, {'dataset': 'natural_questions', 'config_name': 'default'}, ] def __UpperCamelCase ( a : Dict=True ) ->str: if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__a ) ) class _lowercase ( __a ): _UpperCAmelCase = None _UpperCAmelCase = None def UpperCamelCase ( self , A__ , A__ ) -> str: with TemporaryDirectory() as tmp_dir: snake_case = dataset_module_factory(A__ , cache_dir=A__ ) snake_case = import_main_class(dataset_module.module_path , dataset=A__ ) snake_case = builder_cls( cache_dir=A__ , config_name=A__ , hash=dataset_module.hash , ) snake_case = '''/'''.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=A__ ).replace(os.sep , '''/''' ), config.DATASET_INFO_FILENAME, ] ) snake_case = cached_path(A__ , cache_dir=A__ ) self.assertTrue(os.path.exists(A__ ) ) @pytest.mark.integration def __UpperCamelCase ( a : List[str] ) ->Any: snake_case = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple''' snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a ) snake_case = import_main_class(dataset_module.module_path ) snake_case = builder_cls( cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam snake_case = None builder_instance.download_and_prepare() snake_case = builder_instance.as_dataset() assert ds @pytest.mark.integration def __UpperCamelCase ( a : Any ) ->Union[str, Any]: snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a ) snake_case = import_main_class(dataset_module.module_path , dataset=a ) snake_case = builder_cls( cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , ) snake_case = builder_instance.as_streaming_dataset() assert ds assert isinstance(a , a ) assert "train" in ds assert isinstance(ds['''train'''] , a ) assert next(iter(ds['''train'''] ) )
44
0
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def __UpperCamelCase ( a : Dict , a : Optional[int] , a : Dict , a : Dict ) ->Union[str, Any]: snake_case = original_name.split('''.''' )[0] snake_case = key.split('''.''' ) snake_case = int(key_list[key_list.index(a ) - 2] ) snake_case = int(key_list[key_list.index(a ) - 1] ) snake_case = orig_block_num - offset snake_case = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" ) return key def __UpperCamelCase ( a : Tuple ) ->Dict: snake_case = OrderedDict() snake_case , snake_case = 0, 0 for key, value in state_dict.items(): if key.startswith('''network''' ): snake_case = key.replace('''network''' , '''poolformer.encoder''' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('''bias''' ) and "patch_embed" not in key: patch_emb_offset += 1 snake_case = key[: key.find('''proj''' )] snake_case = key.replace(a , f"""patch_embeddings.{total_embed_found}.""" ) snake_case = key.replace('''proj''' , '''projection''' ) if key.endswith('''bias''' ): total_embed_found += 1 if "patch_embeddings" in key: snake_case = '''poolformer.encoder.''' + key if "mlp.fc1" in key: snake_case = replace_key_with_offset(a , a , '''mlp.fc1''' , '''output.conv1''' ) if "mlp.fc2" in key: snake_case = replace_key_with_offset(a , a , '''mlp.fc2''' , '''output.conv2''' ) if "norm1" in key: snake_case = replace_key_with_offset(a , a , '''norm1''' , '''before_norm''' ) if "norm2" in key: snake_case = replace_key_with_offset(a , a , '''norm2''' , '''after_norm''' ) if "layer_scale_1" in key: snake_case = replace_key_with_offset(a , a , '''layer_scale_1''' , '''layer_scale_1''' ) if "layer_scale_2" in key: snake_case = replace_key_with_offset(a , a , '''layer_scale_2''' , '''layer_scale_2''' ) if "head" in key: snake_case = key.replace('''head''' , '''classifier''' ) snake_case = value return new_state_dict def __UpperCamelCase ( ) ->Optional[int]: snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case = Image.open(requests.get(a , stream=a ).raw ) return image @torch.no_grad() def __UpperCamelCase ( a : Dict , a : Optional[Any] , a : Tuple ) ->List[str]: snake_case = PoolFormerConfig() # set attributes based on model_name snake_case = '''huggingface/label-files''' snake_case = model_name[-3:] snake_case = 1000 snake_case = '''imagenet-1k-id2label.json''' snake_case = (1, 1000) # set config attributes snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) ) snake_case = {int(a ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} if size == "s12": snake_case = [2, 2, 6, 2] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 0.9 elif size == "s24": snake_case = [4, 4, 12, 4] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 0.9 elif size == "s36": snake_case = [6, 6, 18, 6] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.9 elif size == "m36": snake_case = [6, 6, 18, 6] snake_case = [96, 192, 384, 768] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.95 elif size == "m48": snake_case = [8, 8, 24, 8] snake_case = [96, 192, 384, 768] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.95 else: raise ValueError(f"""Size {size} not supported""" ) # load image processor snake_case = PoolFormerImageProcessor(crop_pct=a ) # Prepare image snake_case = prepare_img() snake_case = image_processor(images=a , return_tensors='''pt''' ).pixel_values logger.info(f"""Converting model {model_name}...""" ) # load original state dict snake_case = torch.load(a , map_location=torch.device('''cpu''' ) ) # rename keys snake_case = rename_keys(a ) # create HuggingFace model and load state dict snake_case = PoolFormerForImageClassification(a ) model.load_state_dict(a ) model.eval() # Define image processor snake_case = PoolFormerImageProcessor(crop_pct=a ) snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values # forward pass snake_case = model(a ) snake_case = outputs.logits # define expected logit slices for different models if size == "s12": snake_case = torch.tensor([-0.3045, -0.6758, -0.4869] ) elif size == "s24": snake_case = torch.tensor([0.4402, -0.1374, -0.8045] ) elif size == "s36": snake_case = torch.tensor([-0.6080, -0.5133, -0.5898] ) elif size == "m36": snake_case = torch.tensor([0.3952, 0.2263, -1.2668] ) elif size == "m48": snake_case = torch.tensor([0.1167, -0.0656, -0.3423] ) else: raise ValueError(f"""Size {size} not supported""" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a , atol=1e-2 ) # finally, save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) _lowercase = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
709
'''simple docstring''' def __UpperCamelCase ( a : int , a : int ) ->int: while b: snake_case , snake_case = b, a % b return a def __UpperCamelCase ( a : int , a : int ) ->int: return a if b == 0 else euclidean_gcd_recursive(a , a % b ) def __UpperCamelCase ( ) ->Optional[Any]: print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
44
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class _lowercase ( __a ): _UpperCAmelCase = '''bert''' def __init__( self , A__=3_05_22 , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=2 , A__=0.0_2 , A__=1e-12 , A__=0 , A__="absolute" , A__=True , A__=None , **A__ , ) -> List[Any]: super().__init__(pad_token_id=A__ , **A__ ) snake_case = vocab_size snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = hidden_act snake_case = intermediate_size snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = max_position_embeddings snake_case = type_vocab_size snake_case = initializer_range snake_case = layer_norm_eps snake_case = position_embedding_type snake_case = use_cache snake_case = classifier_dropout class _lowercase ( __a ): @property def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": snake_case = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: snake_case = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
710
'''simple docstring''' import argparse import copy def __UpperCamelCase ( a : Union[str, Any] ) ->Tuple: snake_case = {} with open(a ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: snake_case = [] _list.append([line.split()[1], line.split()[2]] ) snake_case = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: snake_case = [] _list.append([line.split()[0], line.split()[2]] ) snake_case = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def __UpperCamelCase ( a : Dict , a : Tuple ) ->int: with open(a ) as f: snake_case = f.read(1 ) snake_case = start_node snake_case = [] snake_case = start_node snake_case = 0 while visiting not in first_solution: snake_case = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(a ) and k[0] not in first_solution: snake_case = k[1] snake_case = k[0] first_solution.append(a ) snake_case = distance_of_first_solution + int(a ) snake_case = best_node first_solution.append(a ) snake_case = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 snake_case = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def __UpperCamelCase ( a : Optional[int] , a : str ) ->str: snake_case = [] for n in solution[1:-1]: snake_case = solution.index(a ) for kn in solution[1:-1]: snake_case = solution.index(a ) if n == kn: continue snake_case = copy.deepcopy(a ) snake_case = kn snake_case = n snake_case = 0 for k in _tmp[:-1]: snake_case = _tmp[_tmp.index(a ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: snake_case = distance + int(i[1] ) _tmp.append(a ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) snake_case = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda a : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def __UpperCamelCase ( a : Any , a : Optional[Any] , a : int , a : Optional[int] , a : Union[str, Any] ) ->List[Any]: snake_case = 1 snake_case = first_solution snake_case = [] snake_case = distance_of_first_solution snake_case = solution while count <= iters: snake_case = find_neighborhood(a , a ) snake_case = 0 snake_case = neighborhood[index_of_best_solution] snake_case = len(a ) - 1 snake_case = False while not found: snake_case = 0 while i < len(a ): if best_solution[i] != solution[i]: snake_case = best_solution[i] snake_case = solution[i] break snake_case = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) snake_case = True snake_case = best_solution[:-1] snake_case = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: snake_case = cost snake_case = solution else: snake_case = index_of_best_solution + 1 snake_case = neighborhood[index_of_best_solution] if len(a ) >= size: tabu_list.pop(0 ) snake_case = count + 1 return best_solution_ever, best_cost def __UpperCamelCase ( a : Union[str, Any]=None ) ->Optional[Any]: snake_case = generate_neighbours(args.File ) snake_case , snake_case = generate_first_solution( args.File , a ) snake_case , snake_case = tabu_search( a , a , a , args.Iterations , args.Size , ) print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
44
0
'''simple docstring''' from manim import * class _lowercase ( __a ): def UpperCamelCase ( self ) -> str: snake_case = Rectangle(height=0.5 , width=0.5 ) snake_case = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) snake_case = [mem.copy() for i in range(6 )] snake_case = [mem.copy() for i in range(6 )] snake_case = VGroup(*A__ ).arrange(A__ , buff=0 ) snake_case = VGroup(*A__ ).arrange(A__ , buff=0 ) snake_case = VGroup(A__ , A__ ).arrange(A__ , buff=0 ) snake_case = Text('''CPU''' , font_size=24 ) snake_case = Group(A__ , A__ ).arrange(A__ , buff=0.5 , aligned_edge=A__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(A__ ) snake_case = [mem.copy() for i in range(1 )] snake_case = VGroup(*A__ ).arrange(A__ , buff=0 ) snake_case = Text('''GPU''' , font_size=24 ) snake_case = Group(A__ , A__ ).arrange(A__ , buff=0.5 , aligned_edge=A__ ) gpu.align_to(A__ , A__ ) gpu.set_x(gpu.get_x() - 1 ) self.add(A__ ) snake_case = [mem.copy() for i in range(6 )] snake_case = VGroup(*A__ ).arrange(A__ , buff=0 ) snake_case = Text('''Model''' , font_size=24 ) snake_case = Group(A__ , A__ ).arrange(A__ , buff=0.5 , aligned_edge=A__ ) model.move_to([3, -1.0, 0] ) self.play( Create(A__ , run_time=1 ) , Create(A__ , run_time=1 ) , Create(A__ , run_time=1 ) , ) snake_case = MarkupText( F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , ) snake_case = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) snake_case = MarkupText( F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(A__ , run_time=2.5 ) , Write(A__ ) , Write(A__ ) ) self.add(A__ ) snake_case = [] snake_case = [] snake_case = [] for i, rect in enumerate(A__ ): snake_case = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(A__ , opacity=0.7 ) cpu_target.move_to(A__ ) cpu_target.generate_target() snake_case = 0.4_6 / 4 snake_case = 0.4_6 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=A__ ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=A__ , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=A__ , buff=0.0 ) cpu_targs.append(A__ ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(A__ ) ) second_animations.append(MoveToTarget(A__ , run_time=1.5 ) ) self.play(*A__ ) self.play(*A__ ) self.wait()
711
'''simple docstring''' from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
44
0
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class _lowercase : def __init__( self , A__ , A__=2 , A__=True , A__=False , A__=10 , A__=3 , A__=32 * 8 , A__=32 * 8 , A__=4 , A__=64 , ) -> Union[str, Any]: snake_case = parent snake_case = batch_size snake_case = is_training snake_case = use_auxiliary_loss snake_case = num_queries snake_case = num_channels snake_case = min_size snake_case = max_size snake_case = num_labels snake_case = hidden_dim snake_case = hidden_dim def UpperCamelCase ( self ) -> Optional[int]: snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( A__ ) snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=A__ ) snake_case = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=A__ ) > 0.5 ).float() snake_case = (torch.rand((self.batch_size, self.num_labels) , device=A__ ) > 0.5).long() snake_case = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase ( self ) -> Optional[Any]: snake_case = MaskaFormerConfig( hidden_size=self.hidden_dim , ) snake_case = self.num_queries snake_case = self.num_labels snake_case = [1, 1, 1, 1] snake_case = self.num_channels snake_case = 64 snake_case = 1_28 snake_case = self.hidden_dim snake_case = self.hidden_dim snake_case = self.hidden_dim return config def UpperCamelCase ( self ) -> Dict: snake_case , snake_case , snake_case , snake_case , snake_case = self.prepare_config_and_inputs() snake_case = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def UpperCamelCase ( self , A__ , A__ ) -> Union[str, Any]: snake_case = output.encoder_hidden_states snake_case = output.pixel_decoder_hidden_states snake_case = output.transformer_decoder_hidden_states self.parent.assertTrue(len(A__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(A__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(A__ ) , config.decoder_layers ) def UpperCamelCase ( self , A__ , A__ , A__ , A__=False ) -> str: with torch.no_grad(): snake_case = MaskaFormerModel(config=A__ ) model.to(A__ ) model.eval() snake_case = model(pixel_values=A__ , pixel_mask=A__ ) snake_case = model(A__ , output_hidden_states=A__ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(A__ , A__ ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> List[str]: snake_case = MaskaFormerForUniversalSegmentation(config=A__ ) model.to(A__ ) model.eval() def comm_check_on_output(A__ ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): snake_case = model(pixel_values=A__ , pixel_mask=A__ ) snake_case = model(A__ ) comm_check_on_output(A__ ) snake_case = model( pixel_values=A__ , pixel_mask=A__ , mask_labels=A__ , class_labels=A__ ) comm_check_on_output(A__ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class _lowercase ( __a , __a , unittest.TestCase ): _UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () _UpperCAmelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def UpperCamelCase ( self ) -> str: snake_case = MaskaFormerModelTester(self ) snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ ) def UpperCamelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def UpperCamelCase ( self ) -> str: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(A__ , **A__ , output_hidden_states=A__ ) def UpperCamelCase ( self ) -> str: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A__ ) @unittest.skip(reason='''Mask2Former does not use inputs_embeds''' ) def UpperCamelCase ( self ) -> Dict: pass @unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' ) def UpperCamelCase ( self ) -> List[Any]: pass @unittest.skip(reason='''Mask2Former is not a generative model''' ) def UpperCamelCase ( self ) -> Optional[Any]: pass @unittest.skip(reason='''Mask2Former does not use token embeddings''' ) def UpperCamelCase ( self ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def UpperCamelCase ( self ) -> List[Any]: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCamelCase ( self ) -> str: pass def UpperCamelCase ( self ) -> str: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = model_class(A__ ) snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case = [*signature.parameters.keys()] snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A__ ) @slow def UpperCamelCase ( self ) -> List[str]: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: snake_case = MaskaFormerModel.from_pretrained(A__ ) self.assertIsNotNone(A__ ) def UpperCamelCase ( self ) -> Dict: snake_case = (self.model_tester.min_size,) * 2 snake_case = { '''pixel_values''': torch.randn((2, 3, *size) , device=A__ ), '''mask_labels''': torch.randn((2, 10, *size) , device=A__ ), '''class_labels''': torch.zeros(2 , 10 , device=A__ ).long(), } snake_case = self.model_tester.get_config() snake_case = MaskaFormerForUniversalSegmentation(A__ ).to(A__ ) snake_case = model(**A__ ) self.assertTrue(outputs.loss is not None ) def UpperCamelCase ( self ) -> Optional[int]: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(A__ , **A__ , output_hidden_states=A__ ) def UpperCamelCase ( self ) -> List[Any]: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = model_class(A__ ).to(A__ ) snake_case = model(**A__ , output_attentions=A__ ) self.assertTrue(outputs.attentions is not None ) def UpperCamelCase ( self ) -> int: if not self.model_tester.is_training: return snake_case = self.all_model_classes[1] snake_case , snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs() snake_case = model_class(A__ ) model.to(A__ ) model.train() snake_case = model(A__ , mask_labels=A__ , class_labels=A__ ).loss loss.backward() def UpperCamelCase ( self ) -> Optional[Any]: snake_case = self.all_model_classes[1] snake_case , snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs() snake_case = True snake_case = True snake_case = model_class(A__ ).to(A__ ) model.train() snake_case = model(A__ , mask_labels=A__ , class_labels=A__ ) snake_case = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() snake_case = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() snake_case = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() snake_case = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=A__ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _lowercase = 1E-4 def __UpperCamelCase ( ) ->Union[str, Any]: snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class _lowercase ( unittest.TestCase ): @cached_property def UpperCamelCase ( self ) -> Tuple: return "facebook/mask2former-swin-small-coco-instance" @cached_property def UpperCamelCase ( self ) -> str: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def UpperCamelCase ( self ) -> Optional[int]: snake_case = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A__ ) snake_case = self.default_image_processor snake_case = prepare_img() snake_case = image_processor(A__ , return_tensors='''pt''' ).to(A__ ) snake_case = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(A__ , (1, 3, 3_84, 3_84) ) with torch.no_grad(): snake_case = model(**A__ ) snake_case = torch.tensor( [[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(A__ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , A__ , atol=A__ ) ) snake_case = torch.tensor( [[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(A__ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , A__ , atol=A__ ) ) snake_case = torch.tensor( [[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(A__ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , A__ , atol=A__ ) ) def UpperCamelCase ( self ) -> str: snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A__ ).eval() snake_case = self.default_image_processor snake_case = prepare_img() snake_case = image_processor(A__ , return_tensors='''pt''' ).to(A__ ) snake_case = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(A__ , (1, 3, 3_84, 3_84) ) with torch.no_grad(): snake_case = model(**A__ ) # masks_queries_logits snake_case = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) snake_case = [ [-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1], [-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1], [-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5], ] snake_case = torch.tensor(A__ ).to(A__ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , A__ , atol=A__ ) ) # class_queries_logits snake_case = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) snake_case = torch.tensor( [ [1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2], [0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3], [0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5], ] ).to(A__ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , A__ , atol=A__ ) ) def UpperCamelCase ( self ) -> List[str]: snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A__ ).eval() snake_case = self.default_image_processor snake_case = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , ) snake_case = inputs['''pixel_values'''].to(A__ ) snake_case = [el.to(A__ ) for el in inputs['''mask_labels''']] snake_case = [el.to(A__ ) for el in inputs['''class_labels''']] with torch.no_grad(): snake_case = model(**A__ ) self.assertTrue(outputs.loss is not None )
712
'''simple docstring''' from ...processing_utils import ProcessorMixin class _lowercase ( __a ): _UpperCAmelCase = '''WhisperFeatureExtractor''' _UpperCAmelCase = '''WhisperTokenizer''' def __init__( self , A__ , A__ ) -> Optional[Any]: super().__init__(A__ , A__ ) snake_case = self.feature_extractor snake_case = False def UpperCamelCase ( self , A__=None , A__=None , A__=True ) -> Union[str, Any]: return self.tokenizer.get_decoder_prompt_ids(task=A__ , language=A__ , no_timestamps=A__ ) def __call__( self , *A__ , **A__ ) -> Dict: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*A__ , **A__ ) snake_case = kwargs.pop('''audio''' , A__ ) snake_case = kwargs.pop('''sampling_rate''' , A__ ) snake_case = kwargs.pop('''text''' , A__ ) if len(A__ ) > 0: snake_case = args[0] snake_case = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: snake_case = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ ) if text is not None: snake_case = self.tokenizer(A__ , **A__ ) if text is None: return inputs elif audio is None: return encodings else: snake_case = encodings['''input_ids'''] return inputs def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]: return self.tokenizer.batch_decode(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> str: return self.tokenizer.decode(*A__ , **A__ ) def UpperCamelCase ( self , A__ , A__="np" ) -> Optional[Any]: return self.tokenizer.get_prompt_ids(A__ , return_tensors=A__ )
44
0
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowercase ( __a , __a , unittest.TestCase ): _UpperCAmelCase = IFInpaintingSuperResolutionPipeline _UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} _UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) _UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''} def UpperCamelCase ( self ) -> int: return self._get_superresolution_dummy_components() def UpperCamelCase ( self , A__ , A__=0 ) -> Union[str, Any]: if str(A__ ).startswith('''mps''' ): snake_case = torch.manual_seed(A__ ) else: snake_case = torch.Generator(device=A__ ).manual_seed(A__ ) snake_case = floats_tensor((1, 3, 16, 16) , rng=random.Random(A__ ) ).to(A__ ) snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ ) snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ ) snake_case = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase ( self ) -> List[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def UpperCamelCase ( self ) -> Optional[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def UpperCamelCase ( self ) -> List[str]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def UpperCamelCase ( self ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def UpperCamelCase ( self ) -> Optional[Any]: self._test_save_load_local() def UpperCamelCase ( self ) -> Dict: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
713
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _lowercase ( __a ): _UpperCAmelCase = '''char''' _UpperCAmelCase = '''bpe''' _UpperCAmelCase = '''wp''' _lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _lowercase ( __a ): _UpperCAmelCase = ['''image_processor''', '''char_tokenizer'''] _UpperCAmelCase = '''ViTImageProcessor''' _UpperCAmelCase = '''MgpstrTokenizer''' def __init__( self , A__=None , A__=None , **A__ ) -> List[Any]: snake_case = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , A__ , ) snake_case = kwargs.pop('''feature_extractor''' ) snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) snake_case = tokenizer snake_case = AutoTokenizer.from_pretrained('''gpt2''' ) snake_case = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(A__ , A__ ) def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> List[str]: if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: snake_case = self.image_processor(A__ , return_tensors=A__ , **A__ ) if text is not None: snake_case = self.char_tokenizer(A__ , return_tensors=A__ , **A__ ) if text is None: return inputs elif images is None: return encodings else: snake_case = encodings['''input_ids'''] return inputs def UpperCamelCase ( self , A__ ) -> Dict: snake_case , snake_case , snake_case = sequences snake_case = char_preds.size(0 ) snake_case , snake_case = self._decode_helper(A__ , '''char''' ) snake_case , snake_case = self._decode_helper(A__ , '''bpe''' ) snake_case , snake_case = self._decode_helper(A__ , '''wp''' ) snake_case = [] snake_case = [] for i in range(A__ ): snake_case = [char_scores[i], bpe_scores[i], wp_scores[i]] snake_case = [char_strs[i], bpe_strs[i], wp_strs[i]] snake_case = scores.index(max(A__ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) snake_case = {} snake_case = final_strs snake_case = final_scores snake_case = char_strs snake_case = bpe_strs snake_case = wp_strs return out def UpperCamelCase ( self , A__ , A__ ) -> Optional[Any]: if format == DecodeType.CHARACTER: snake_case = self.char_decode snake_case = 1 snake_case = '''[s]''' elif format == DecodeType.BPE: snake_case = self.bpe_decode snake_case = 2 snake_case = '''#''' elif format == DecodeType.WORDPIECE: snake_case = self.wp_decode snake_case = 1_02 snake_case = '''[SEP]''' else: raise ValueError(F"""Format {format} is not supported.""" ) snake_case , snake_case = [], [] snake_case = pred_logits.size(0 ) snake_case = pred_logits.size(1 ) snake_case , snake_case = pred_logits.topk(1 , dim=-1 , largest=A__ , sorted=A__ ) snake_case = preds_index.view(-1 , A__ )[:, 1:] snake_case = decoder(A__ ) snake_case , snake_case = torch.nn.functional.softmax(A__ , dim=2 ).max(dim=2 ) snake_case = preds_max_prob[:, 1:] for index in range(A__ ): snake_case = preds_str[index].find(A__ ) snake_case = preds_str[index][:pred_eos] snake_case = preds_index[index].cpu().tolist() snake_case = pred_index.index(A__ ) if eos_token in pred_index else -1 snake_case = preds_max_prob[index][: pred_eos_index + 1] snake_case = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(A__ ) conf_scores.append(A__ ) return dec_strs, conf_scores def UpperCamelCase ( self , A__ ) -> int: snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(A__ )] return decode_strs def UpperCamelCase ( self , A__ ) -> List[str]: return self.bpe_tokenizer.batch_decode(A__ ) def UpperCamelCase ( self , A__ ) -> Union[str, Any]: snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(A__ )] return decode_strs
44
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def __UpperCamelCase ( a : List[str] ) ->Tuple: snake_case = SwinvaConfig() snake_case = swinva_name.split('''_''' ) snake_case = name_split[1] if "to" in name_split[3]: snake_case = int(name_split[3][-3:] ) else: snake_case = int(name_split[3] ) if "to" in name_split[2]: snake_case = int(name_split[2][-2:] ) else: snake_case = int(name_split[2][6:] ) if model_size == "tiny": snake_case = 96 snake_case = (2, 2, 6, 2) snake_case = (3, 6, 12, 24) elif model_size == "small": snake_case = 96 snake_case = (2, 2, 18, 2) snake_case = (3, 6, 12, 24) elif model_size == "base": snake_case = 128 snake_case = (2, 2, 18, 2) snake_case = (4, 8, 16, 32) else: snake_case = 192 snake_case = (2, 2, 18, 2) snake_case = (6, 12, 24, 48) if "to" in swinva_name: snake_case = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): snake_case = 2_1841 snake_case = '''huggingface/label-files''' snake_case = '''imagenet-22k-id2label.json''' snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) ) snake_case = {int(a ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} else: snake_case = 1000 snake_case = '''huggingface/label-files''' snake_case = '''imagenet-1k-id2label.json''' snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) ) snake_case = {int(a ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} snake_case = img_size snake_case = num_classes snake_case = embed_dim snake_case = depths snake_case = num_heads snake_case = window_size return config def __UpperCamelCase ( a : Dict ) ->int: if "patch_embed.proj" in name: snake_case = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: snake_case = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: snake_case = '''encoder.''' + name if "attn.proj" in name: snake_case = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: snake_case = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: snake_case = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: snake_case = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: snake_case = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: snake_case = name.replace('''mlp.fc2''' , '''output.dense''' ) if "q_bias" in name: snake_case = name.replace('''q_bias''' , '''query.bias''' ) if "k_bias" in name: snake_case = name.replace('''k_bias''' , '''key.bias''' ) if "v_bias" in name: snake_case = name.replace('''v_bias''' , '''value.bias''' ) if "cpb_mlp" in name: snake_case = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' ) if name == "norm.weight": snake_case = '''layernorm.weight''' if name == "norm.bias": snake_case = '''layernorm.bias''' if "head" in name: snake_case = name.replace('''head''' , '''classifier''' ) else: snake_case = '''swinv2.''' + name return name def __UpperCamelCase ( a : Any , a : Union[str, Any] ) ->Dict: for key in orig_state_dict.copy().keys(): snake_case = orig_state_dict.pop(a ) if "mask" in key: continue elif "qkv" in key: snake_case = key.split('''.''' ) snake_case = int(key_split[1] ) snake_case = int(key_split[3] ) snake_case = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: snake_case = val[:dim, :] snake_case = val[dim : dim * 2, :] snake_case = val[-dim:, :] else: snake_case = val[:dim] snake_case = val[ dim : dim * 2 ] snake_case = val[-dim:] else: snake_case = val return orig_state_dict def __UpperCamelCase ( a : Optional[int] , a : Union[str, Any] ) ->str: snake_case = timm.create_model(a , pretrained=a ) timm_model.eval() snake_case = get_swinva_config(a ) snake_case = SwinvaForImageClassification(a ) model.eval() snake_case = convert_state_dict(timm_model.state_dict() , a ) model.load_state_dict(a ) snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) ) snake_case = Image.open(requests.get(a , stream=a ).raw ) snake_case = image_processor(images=a , return_tensors='''pt''' ) snake_case = timm_model(inputs['''pixel_values'''] ) snake_case = model(**a ).logits assert torch.allclose(a , a , atol=1e-3 ) print(f"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(a ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a ) model.push_to_hub( repo_path_or_name=Path(a , a ) , organization='''nandwalritik''' , commit_message='''Add model''' , ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swinv2_name', default='swinv2_tiny_patch4_window8_256', type=str, help='Name of the Swinv2 timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) _lowercase = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
714
'''simple docstring''' import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType _lowercase , _lowercase , _lowercase = False, False, False @dataclass class _lowercase : _UpperCAmelCase = None _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = None # Automatically constructed _UpperCAmelCase = "dict" _UpperCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) _UpperCAmelCase = field(default='''Audio''' , init=__a , repr=__a ) def __call__( self ) -> Optional[Any]: return self.pa_type def UpperCamelCase ( self , A__ ) -> dict: try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err if isinstance(A__ , A__ ): return {"bytes": None, "path": value} elif isinstance(A__ , A__ ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes snake_case = BytesIO() sf.write(A__ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith('''pcm''' ): # "PCM" only has raw audio bytes if value.get('''sampling_rate''' ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' ) if value.get('''bytes''' ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) snake_case = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67 else: snake_case = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67 snake_case = BytesIO(bytes() ) sf.write(A__ , A__ , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def UpperCamelCase ( self , A__ , A__ = None ) -> dict: if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' ) snake_case , snake_case = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None) if path is None and file is None: raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err snake_case = xsplitext(A__ )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( '''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( '''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) if file is None: snake_case = token_per_repo_id or {} snake_case = path.split('''::''' )[-1] try: snake_case = string_to_dict(A__ , config.HUB_DATASETS_URL )['''repo_id'''] snake_case = token_per_repo_id[repo_id] except (ValueError, KeyError): snake_case = None with xopen(A__ , '''rb''' , use_auth_token=A__ ) as f: snake_case , snake_case = sf.read(A__ ) else: snake_case , snake_case = sf.read(A__ ) snake_case = array.T if self.mono: snake_case = librosa.to_mono(A__ ) if self.sampling_rate and self.sampling_rate != sampling_rate: snake_case = librosa.resample(A__ , orig_sr=A__ , target_sr=self.sampling_rate ) snake_case = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def UpperCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value if self.decode: raise ValueError('''Cannot flatten a decoded Audio feature.''' ) return { "bytes": Value('''binary''' ), "path": Value('''string''' ), } def UpperCamelCase ( self , A__ ) -> pa.StructArray: if pa.types.is_string(storage.type ): snake_case = pa.array([None] * len(A__ ) , type=pa.binary() ) snake_case = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): snake_case = pa.array([None] * len(A__ ) , type=pa.string() ) snake_case = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ): snake_case = pa.array([Audio().encode_example(A__ ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: snake_case = storage.field('''bytes''' ) else: snake_case = pa.array([None] * len(A__ ) , type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: snake_case = storage.field('''path''' ) else: snake_case = pa.array([None] * len(A__ ) , type=pa.string() ) snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) return array_cast(A__ , self.pa_type ) def UpperCamelCase ( self , A__ ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(A__ ): with xopen(A__ , '''rb''' ) as f: snake_case = f.read() return bytes_ snake_case = pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) snake_case = pa.array( [os.path.basename(A__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , ) snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() ) return array_cast(A__ , self.pa_type )
44
0
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf _lowercase = logging.get_logger(__name__) @dataclass class _lowercase ( __a ): _UpperCAmelCase = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self , **A__ ) -> Optional[int]: '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: snake_case = deprecated_arg[3:] snake_case = not kwargs.pop(A__ ) logger.warning( F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" F""" {positive_arg}={kwargs[positive_arg]}""" ) snake_case = kwargs.pop('''tpu_name''' , self.tpu_name ) snake_case = kwargs.pop('''device_idx''' , self.device_idx ) snake_case = kwargs.pop('''eager_mode''' , self.eager_mode ) snake_case = kwargs.pop('''use_xla''' , self.use_xla ) super().__init__(**A__ ) _UpperCAmelCase = field( default=__a , metadata={'''help''': '''Name of TPU'''} , ) _UpperCAmelCase = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) _UpperCAmelCase = field(default=__a , metadata={'''help''': '''Benchmark models in eager model.'''} ) _UpperCAmelCase = field( default=__a , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def UpperCamelCase ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: '''simple docstring''' requires_backends(self , ['''tf'''] ) snake_case = None if self.tpu: try: if self.tpu_name: snake_case = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: snake_case = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: snake_case = None return tpu @cached_property def UpperCamelCase ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: '''simple docstring''' requires_backends(self , ['''tf'''] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) snake_case = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' ) snake_case = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU snake_case = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" ) return strategy @property def UpperCamelCase ( self ) -> bool: '''simple docstring''' requires_backends(self , ['''tf'''] ) return self._setup_tpu is not None @property def UpperCamelCase ( self ) -> "tf.distribute.Strategy": '''simple docstring''' requires_backends(self , ['''tf'''] ) return self._setup_strategy @property def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['''tf'''] ) return tf.config.list_physical_devices('''GPU''' ) @property def UpperCamelCase ( self ) -> int: '''simple docstring''' requires_backends(self , ['''tf'''] ) if self.cuda: return len(self.gpu_list ) return 0 @property def UpperCamelCase ( self ) -> bool: '''simple docstring''' return self.n_gpu > 0
715
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class _lowercase : @staticmethod def UpperCamelCase ( *A__ , **A__ ) -> List[Any]: pass def __UpperCamelCase ( a : Image ) ->str: snake_case = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class _lowercase ( unittest.TestCase ): _UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]: snake_case = DepthEstimationPipeline(model=A__ , image_processor=A__ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def UpperCamelCase ( self , A__ , A__ ) -> List[Any]: snake_case = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , A__ ) import datasets snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) snake_case = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , A__ , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def UpperCamelCase ( self ) -> Optional[Any]: pass @slow @require_torch def UpperCamelCase ( self ) -> Dict: snake_case = '''Intel/dpt-large''' snake_case = pipeline('''depth-estimation''' , model=A__ ) snake_case = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) snake_case = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 ) @require_torch def UpperCamelCase ( self ) -> Any: # This is highly irregular to have no small tests. self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
44
0
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowercase ( __a , unittest.TestCase ): _UpperCAmelCase = LEDTokenizer _UpperCAmelCase = LEDTokenizerFast _UpperCAmelCase = True def UpperCamelCase ( self ) -> int: super().setUp() snake_case = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] snake_case = dict(zip(A__ , range(len(A__ ) ) ) ) snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] snake_case = {'''unk_token''': '''<unk>'''} snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(A__ ) ) def UpperCamelCase ( self , **A__ ) -> int: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self , **A__ ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self , A__ ) -> Dict: return "lower newer", "lower newer" @cached_property def UpperCamelCase ( self ) -> str: return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def UpperCamelCase ( self ) -> Optional[int]: return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def UpperCamelCase ( self ) -> str: snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] snake_case = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: snake_case = tokenizer(A__ , max_length=len(A__ ) , padding=A__ , return_tensors='''pt''' ) self.assertIsInstance(A__ , A__ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) snake_case = batch.input_ids.tolist()[0] self.assertListEqual(A__ , A__ ) @require_torch def UpperCamelCase ( self ) -> Any: snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: snake_case = tokenizer(A__ , padding=A__ , return_tensors='''pt''' ) self.assertIn('''input_ids''' , A__ ) self.assertIn('''attention_mask''' , A__ ) self.assertNotIn('''labels''' , A__ ) self.assertNotIn('''decoder_attention_mask''' , A__ ) @require_torch def UpperCamelCase ( self ) -> Optional[Any]: snake_case = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: snake_case = tokenizer(text_target=A__ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def UpperCamelCase ( self ) -> List[str]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: snake_case = tokenizer( ['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=A__ , truncation=A__ , return_tensors='''pt''' ) self.assertIsInstance(A__ , A__ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = ['''A long paragraph for summarization.'''] snake_case = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: snake_case = tokenizer(A__ , return_tensors='''pt''' ) snake_case = tokenizer(text_target=A__ , return_tensors='''pt''' ) snake_case = inputs['''input_ids'''] snake_case = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def UpperCamelCase ( self ) -> int: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: snake_case = ['''Summary of the text.''', '''Another summary.'''] snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] snake_case = tokenizer(A__ , padding=A__ ) snake_case = [[0] * len(A__ ) for x in encoded_output['''input_ids''']] snake_case = tokenizer.pad(A__ ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , A__ ) def UpperCamelCase ( self ) -> Tuple: pass def UpperCamelCase ( self ) -> Dict: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) snake_case = self.tokenizer_class.from_pretrained(A__ , **A__ ) snake_case = '''A, <mask> AllenNLP sentence.''' snake_case = tokenizer_r.encode_plus(A__ , add_special_tokens=A__ , return_token_type_ids=A__ ) snake_case = tokenizer_p.encode_plus(A__ , add_special_tokens=A__ , return_token_type_ids=A__ ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( A__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( A__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
716
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __UpperCamelCase ( a : Optional[int] ) ->Dict: snake_case = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(a , a ) def __UpperCamelCase ( a : Optional[Any] ) ->int: snake_case = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: snake_case = s_dict.pop(a ) elif "subsample" in key: snake_case = s_dict.pop(a ) def __UpperCamelCase ( a : Optional[int] ) ->Optional[int]: snake_case , snake_case = emb.weight.shape snake_case = nn.Linear(a , a , bias=a ) snake_case = emb.weight.data return lin_layer def __UpperCamelCase ( a : Any , a : Tuple ) ->Tuple: snake_case = torch.load(a , map_location='''cpu''' ) snake_case = mam_aaa['''args'''] snake_case = mam_aaa['''model'''] snake_case = state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(a ) rename_keys(a ) snake_case = state_dict['''decoder.embed_tokens.weight'''].shape[0] snake_case = args.share_decoder_input_output_embed snake_case = [int(a ) for i in args.conv_kernel_sizes.split(''',''' )] snake_case = SpeechaTextConfig( vocab_size=a , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(a ) , conv_channels=args.conv_channels , conv_kernel_sizes=a , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=a , num_beams=5 , max_length=200 , use_cache=a , decoder_start_token_id=2 , early_stopping=a , ) snake_case = SpeechaTextForConditionalGeneration(a ) snake_case , snake_case = model.model.load_state_dict(a , strict=a ) if len(a ) > 0 and not set(a ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f""" but all the following weights are missing {missing}""" ) if tie_embeds: snake_case = make_linear_from_emb(model.model.decoder.embed_tokens ) else: snake_case = lm_head_weights model.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') _lowercase = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
44
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _lowercase = logging.get_logger(__name__) def __UpperCamelCase ( a : Optional[Any] ) ->List[List[ImageInput]]: if isinstance(a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(a , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(a ): return [[videos]] raise ValueError(f"""Could not make batched video from {videos}""" ) class _lowercase ( __a ): _UpperCAmelCase = ['''pixel_values'''] def __init__( self , A__ = True , A__ = None , A__ = PILImageResampling.BILINEAR , A__ = True , A__ = None , A__ = True , A__ = 1 / 2_55 , A__ = True , A__ = True , A__ = None , A__ = None , **A__ , ) -> None: super().__init__(**A__ ) snake_case = size if size is not None else {'''shortest_edge''': 2_56} snake_case = get_size_dict(A__ , default_to_square=A__ ) snake_case = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} snake_case = get_size_dict(A__ , param_name='''crop_size''' ) snake_case = do_resize snake_case = size snake_case = do_center_crop snake_case = crop_size snake_case = resample snake_case = do_rescale snake_case = rescale_factor snake_case = offset snake_case = do_normalize snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase ( self , A__ , A__ , A__ = PILImageResampling.BILINEAR , A__ = None , **A__ , ) -> np.ndarray: snake_case = get_size_dict(A__ , default_to_square=A__ ) if "shortest_edge" in size: snake_case = get_resize_output_image_size(A__ , size['''shortest_edge'''] , default_to_square=A__ ) elif "height" in size and "width" in size: snake_case = (size['''height'''], size['''width''']) else: raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ ) def UpperCamelCase ( self , A__ , A__ , A__ = None , **A__ , ) -> np.ndarray: snake_case = get_size_dict(A__ ) if "height" not in size or "width" not in size: raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(A__ , size=(size['''height'''], size['''width''']) , data_format=A__ , **A__ ) def UpperCamelCase ( self , A__ , A__ , A__ = True , A__ = None , **A__ , ) -> List[Any]: snake_case = image.astype(np.floataa ) if offset: snake_case = image - (scale / 2) return rescale(A__ , scale=A__ , data_format=A__ , **A__ ) def UpperCamelCase ( self , A__ , A__ , A__ , A__ = None , **A__ , ) -> np.ndarray: return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ ) def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''' ) # All transformations expect numpy arrays. snake_case = to_numpy_array(A__ ) if do_resize: snake_case = self.resize(image=A__ , size=A__ , resample=A__ ) if do_center_crop: snake_case = self.center_crop(A__ , size=A__ ) if do_rescale: snake_case = self.rescale(image=A__ , scale=A__ , offset=A__ ) if do_normalize: snake_case = self.normalize(image=A__ , mean=A__ , std=A__ ) snake_case = to_channel_dimension_format(A__ , A__ ) return image def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = ChannelDimension.FIRST , **A__ , ) -> PIL.Image.Image: snake_case = do_resize if do_resize is not None else self.do_resize snake_case = resample if resample is not None else self.resample snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case = do_rescale if do_rescale is not None else self.do_rescale snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case = offset if offset is not None else self.offset snake_case = do_normalize if do_normalize is not None else self.do_normalize snake_case = image_mean if image_mean is not None else self.image_mean snake_case = image_std if image_std is not None else self.image_std snake_case = size if size is not None else self.size snake_case = get_size_dict(A__ , default_to_square=A__ ) snake_case = crop_size if crop_size is not None else self.crop_size snake_case = get_size_dict(A__ , param_name='''crop_size''' ) if not valid_images(A__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) snake_case = make_batched(A__ ) snake_case = [ [ self._preprocess_image( image=A__ , do_resize=A__ , size=A__ , resample=A__ , do_center_crop=A__ , crop_size=A__ , do_rescale=A__ , rescale_factor=A__ , offset=A__ , do_normalize=A__ , image_mean=A__ , image_std=A__ , data_format=A__ , ) for img in video ] for video in videos ] snake_case = {'''pixel_values''': videos} return BatchFeature(data=A__ , tensor_type=A__ )
717
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowercase ( metaclass=__a ): _UpperCAmelCase = ['''transformers''', '''torch''', '''note_seq'''] def __init__( self , *A__ , **A__ ) -> Union[str, Any]: requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def UpperCamelCase ( cls , *A__ , **A__ ) -> Optional[Any]: requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def UpperCamelCase ( cls , *A__ , **A__ ) -> Any: requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
44
0
'''simple docstring''' def __UpperCamelCase ( a : int = 1000 ) ->int: snake_case = 2**power snake_case = str(a ) snake_case = list(a ) snake_case = 0 for i in list_num: sum_of_num += int(a ) return sum_of_num if __name__ == "__main__": _lowercase = int(input('Enter the power of 2: ').strip()) print('2 ^ ', power, ' = ', 2**power) _lowercase = solution(power) print('Sum of the digits is: ', result)
718
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator class _lowercase : def __init__( self , A__ ) -> None: snake_case = value snake_case = None snake_case = None class _lowercase : def __init__( self , A__ ) -> None: snake_case = tree def UpperCamelCase ( self , A__ ) -> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self ) -> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
44
0
'''simple docstring''' from __future__ import annotations from collections import namedtuple def __UpperCamelCase ( a : float , a : float , a : float ) ->tuple: snake_case = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
719
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) _lowercase = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] _lowercase = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def __UpperCamelCase ( a : List[str] ) ->Optional[int]: snake_case = torch.load(a , map_location='''cpu''' ) return sd def __UpperCamelCase ( a : Optional[int] , a : Union[str, Any] , a : int=rename_keys_prefix ) ->Tuple: snake_case = OrderedDict() snake_case = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue snake_case = key for name_pair in rename_keys_prefix: snake_case = new_key.replace(name_pair[0] , name_pair[1] ) snake_case = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately snake_case = new_d['''cls.predictions.bias'''] return new_d @torch.no_grad() def __UpperCamelCase ( a : Optional[int] , a : int ) ->Union[str, Any]: assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: snake_case = '''pretraining''' if "vcr" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 512} elif "vqa_advanced" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048} elif "vqa" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048} elif "nlvr" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 1024} else: raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 512} snake_case = '''multichoice''' elif "vqa_advanced" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048} snake_case = '''vqa_advanced''' elif "vqa" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129} snake_case = '''vqa''' elif "nlvr" in checkpoint_path: snake_case = { '''visual_embedding_dim''': 1024, '''num_labels''': 2, } snake_case = '''nlvr''' snake_case = VisualBertConfig(**a ) # Load State Dict snake_case = load_state_dict(a ) snake_case = get_new_dict(a , a ) if model_type == "pretraining": snake_case = VisualBertForPreTraining(a ) elif model_type == "vqa": snake_case = VisualBertForQuestionAnswering(a ) elif model_type == "nlvr": snake_case = VisualBertForVisualReasoning(a ) elif model_type == "multichoice": snake_case = VisualBertForMultipleChoice(a ) model.load_state_dict(a ) # Save Checkpoints Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') _lowercase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
44
0
'''simple docstring''' from typing import Any class _lowercase : def __init__( self , A__ ) -> List[str]: snake_case = data snake_case = None def __repr__( self ) -> str: return F"""Node({self.data})""" class _lowercase : def __init__( self ) -> Optional[int]: snake_case = None def __iter__( self ) -> Any: snake_case = self.head while node: yield node.data snake_case = node.next def __len__( self ) -> int: return sum(1 for _ in self ) def __repr__( self ) -> str: return "->".join([str(A__ ) for item in self] ) def __getitem__( self , A__ ) -> Any: if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , A__ , A__ ) -> None: if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) snake_case = self.head for _ in range(A__ ): snake_case = current.next snake_case = data def UpperCamelCase ( self , A__ ) -> None: self.insert_nth(len(self ) , A__ ) def UpperCamelCase ( self , A__ ) -> None: self.insert_nth(0 , A__ ) def UpperCamelCase ( self , A__ , A__ ) -> None: if not 0 <= index <= len(self ): raise IndexError('''list index out of range''' ) snake_case = Node(A__ ) if self.head is None: snake_case = new_node elif index == 0: snake_case = self.head # link new_node to head snake_case = new_node else: snake_case = self.head for _ in range(index - 1 ): snake_case = temp.next snake_case = temp.next snake_case = new_node def UpperCamelCase ( self ) -> None: # print every node data print(self ) def UpperCamelCase ( self ) -> Any: return self.delete_nth(0 ) def UpperCamelCase ( self ) -> Any: # delete from tail return self.delete_nth(len(self ) - 1 ) def UpperCamelCase ( self , A__ = 0 ) -> Any: if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('''List index out of range.''' ) snake_case = self.head # default first node if index == 0: snake_case = self.head.next else: snake_case = self.head for _ in range(index - 1 ): snake_case = temp.next snake_case = temp.next snake_case = temp.next.next return delete_node.data def UpperCamelCase ( self ) -> bool: return self.head is None def UpperCamelCase ( self ) -> None: snake_case = None snake_case = self.head while current: # Store the current node's next node. snake_case = current.next # Make the current node's next point backwards snake_case = prev # Make the previous node be the current node snake_case = current # Make the current node the next node (to progress iteration) snake_case = next_node # Return prev in order to put the head at the end snake_case = prev def __UpperCamelCase ( ) ->None: snake_case = LinkedList() assert linked_list.is_empty() is True assert str(a ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(a ) == i linked_list.insert_nth(a , i + 1 ) assert str(a ) == "->".join(str(a ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(a ) == "->".join(str(a ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(a ) == 9 assert str(a ) == "->".join(str(a ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): snake_case = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(a ) == "->".join(str(a ) for i in range(-8 , 1 ) ) def __UpperCamelCase ( ) ->None: snake_case = [ -9, 100, Node(7734_5112 ), '''dlrow olleH''', 7, 5555, 0, -192.55555, '''Hello, world!''', 77.9, Node(10 ), None, None, 12.20, ] snake_case = LinkedList() for i in test_input: linked_list.insert_tail(a ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(a ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head snake_case = linked_list.delete_head() assert result == -9 assert ( str(a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail snake_case = linked_list.delete_tail() assert result == 12.2 assert ( str(a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list snake_case = linked_list.delete_nth(10 ) assert result is None assert ( str(a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('''Hello again, world!''' ) ) assert ( str(a ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(a ) assert ( str(a ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(a ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def __UpperCamelCase ( ) ->Tuple: from doctest import testmod testmod() snake_case = LinkedList() linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() ) linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() ) linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() print('''\nDelete head''' ) linked_list.delete_head() print('''Delete tail''' ) linked_list.delete_tail() print('''\nPrint list:''' ) linked_list.print_list() print('''\nReverse linked list''' ) linked_list.reverse() print('''\nPrint list:''' ) linked_list.print_list() print('''\nString representation of linked list:''' ) print(a ) print('''\nReading/changing Node data using indexing:''' ) print(f"""Element at Position 1: {linked_list[1]}""" ) snake_case = input('''Enter New Value: ''' ).strip() print('''New list:''' ) print(a ) print(f"""length of linked_list is : {len(a )}""" ) if __name__ == "__main__": main()
720
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def __UpperCamelCase ( a : Dict , a : Optional[int] , a : Dict , a : Dict ) ->Union[str, Any]: snake_case = original_name.split('''.''' )[0] snake_case = key.split('''.''' ) snake_case = int(key_list[key_list.index(a ) - 2] ) snake_case = int(key_list[key_list.index(a ) - 1] ) snake_case = orig_block_num - offset snake_case = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" ) return key def __UpperCamelCase ( a : Tuple ) ->Dict: snake_case = OrderedDict() snake_case , snake_case = 0, 0 for key, value in state_dict.items(): if key.startswith('''network''' ): snake_case = key.replace('''network''' , '''poolformer.encoder''' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('''bias''' ) and "patch_embed" not in key: patch_emb_offset += 1 snake_case = key[: key.find('''proj''' )] snake_case = key.replace(a , f"""patch_embeddings.{total_embed_found}.""" ) snake_case = key.replace('''proj''' , '''projection''' ) if key.endswith('''bias''' ): total_embed_found += 1 if "patch_embeddings" in key: snake_case = '''poolformer.encoder.''' + key if "mlp.fc1" in key: snake_case = replace_key_with_offset(a , a , '''mlp.fc1''' , '''output.conv1''' ) if "mlp.fc2" in key: snake_case = replace_key_with_offset(a , a , '''mlp.fc2''' , '''output.conv2''' ) if "norm1" in key: snake_case = replace_key_with_offset(a , a , '''norm1''' , '''before_norm''' ) if "norm2" in key: snake_case = replace_key_with_offset(a , a , '''norm2''' , '''after_norm''' ) if "layer_scale_1" in key: snake_case = replace_key_with_offset(a , a , '''layer_scale_1''' , '''layer_scale_1''' ) if "layer_scale_2" in key: snake_case = replace_key_with_offset(a , a , '''layer_scale_2''' , '''layer_scale_2''' ) if "head" in key: snake_case = key.replace('''head''' , '''classifier''' ) snake_case = value return new_state_dict def __UpperCamelCase ( ) ->Optional[int]: snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case = Image.open(requests.get(a , stream=a ).raw ) return image @torch.no_grad() def __UpperCamelCase ( a : Dict , a : Optional[Any] , a : Tuple ) ->List[str]: snake_case = PoolFormerConfig() # set attributes based on model_name snake_case = '''huggingface/label-files''' snake_case = model_name[-3:] snake_case = 1000 snake_case = '''imagenet-1k-id2label.json''' snake_case = (1, 1000) # set config attributes snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) ) snake_case = {int(a ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} if size == "s12": snake_case = [2, 2, 6, 2] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 0.9 elif size == "s24": snake_case = [4, 4, 12, 4] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 0.9 elif size == "s36": snake_case = [6, 6, 18, 6] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.9 elif size == "m36": snake_case = [6, 6, 18, 6] snake_case = [96, 192, 384, 768] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.95 elif size == "m48": snake_case = [8, 8, 24, 8] snake_case = [96, 192, 384, 768] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.95 else: raise ValueError(f"""Size {size} not supported""" ) # load image processor snake_case = PoolFormerImageProcessor(crop_pct=a ) # Prepare image snake_case = prepare_img() snake_case = image_processor(images=a , return_tensors='''pt''' ).pixel_values logger.info(f"""Converting model {model_name}...""" ) # load original state dict snake_case = torch.load(a , map_location=torch.device('''cpu''' ) ) # rename keys snake_case = rename_keys(a ) # create HuggingFace model and load state dict snake_case = PoolFormerForImageClassification(a ) model.load_state_dict(a ) model.eval() # Define image processor snake_case = PoolFormerImageProcessor(crop_pct=a ) snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values # forward pass snake_case = model(a ) snake_case = outputs.logits # define expected logit slices for different models if size == "s12": snake_case = torch.tensor([-0.3045, -0.6758, -0.4869] ) elif size == "s24": snake_case = torch.tensor([0.4402, -0.1374, -0.8045] ) elif size == "s36": snake_case = torch.tensor([-0.6080, -0.5133, -0.5898] ) elif size == "m36": snake_case = torch.tensor([0.3952, 0.2263, -1.2668] ) elif size == "m48": snake_case = torch.tensor([0.1167, -0.0656, -0.3423] ) else: raise ValueError(f"""Size {size} not supported""" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a , atol=1e-2 ) # finally, save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) _lowercase = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
44
0
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () _lowercase = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). _lowercase = [0, 25, 50] _lowercase = [25, 50, 75] _lowercase = fuzz.membership.trimf(X, abca) _lowercase = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. _lowercase = np.ones(75) _lowercase = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) _lowercase = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) _lowercase = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) _lowercase = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) _lowercase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] _lowercase = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) _lowercase = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] _lowercase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] _lowercase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('Young') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('Middle aged') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('union') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('intersection') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('complement_a') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('difference a/b') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('alg_sum') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('alg_product') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('bdd_sum') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('bdd_difference') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
721
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow _lowercase = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) _lowercase = logging.getLogger() def __UpperCamelCase ( ) ->Tuple: snake_case = argparse.ArgumentParser() parser.add_argument('''-f''' ) snake_case = parser.parse_args() return args.f def __UpperCamelCase ( a : Dict , a : Tuple="eval" ) ->List[Any]: snake_case = os.path.join(a , f"""{split}_results.json""" ) if os.path.exists(a ): with open(a , '''r''' ) as f: return json.load(a ) raise ValueError(f"""can't find {path}""" ) _lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _lowercase ( __a ): def UpperCamelCase ( self ) -> List[str]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(A__ , '''argv''' , A__ ): run_flax_glue.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 ) @slow def UpperCamelCase ( self ) -> List[Any]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A__ , '''argv''' , A__ ): run_clm_flax.main() snake_case = get_results(A__ ) self.assertLess(result['''eval_perplexity'''] , 1_00 ) @slow def UpperCamelCase ( self ) -> int: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(A__ , '''argv''' , A__ ): run_summarization_flax.main() snake_case = get_results(A__ , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(A__ , '''argv''' , A__ ): run_mlm_flax.main() snake_case = get_results(A__ ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def UpperCamelCase ( self ) -> Dict: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A__ , '''argv''' , A__ ): run_ta_mlm_flax.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 ) @slow def UpperCamelCase ( self ) -> int: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu snake_case = 7 if get_gpu_count() > 1 else 2 snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(A__ , '''argv''' , A__ ): run_flax_ner.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def UpperCamelCase ( self ) -> Any: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(A__ , '''argv''' , A__ ): run_qa.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
44
0
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": _lowercase = pd.read_csv('sample_data.csv', header=None) _lowercase = df.shape[:1][0] # If you're using some other dataset input the target column _lowercase = df.iloc[:, 1:2] _lowercase = actual_data.values.reshape(len_data, 1) _lowercase = MinMaxScaler().fit_transform(actual_data) _lowercase = 10 _lowercase = 5 _lowercase = 20 _lowercase = len_data - periods * look_back _lowercase = actual_data[:division] _lowercase = actual_data[division - look_back :] _lowercase , _lowercase = [], [] _lowercase , _lowercase = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) _lowercase = np.array(train_x) _lowercase = np.array(test_x) _lowercase = np.array([list(i.ravel()) for i in train_y]) _lowercase = np.array([list(i.ravel()) for i in test_y]) _lowercase = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss='mean_squared_error', optimizer='adam') _lowercase = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) _lowercase = model.predict(x_test)
700
'''simple docstring''' from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS _lowercase = logging.get_logger(__name__) _lowercase = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, 'constant': get_constant_schedule, 'constant_w_warmup': get_constant_schedule_with_warmup, } class _lowercase ( __a ): def __init__( self , A__=None , A__=None , *A__ , **A__ ) -> Union[str, Any]: super().__init__(*A__ , **A__ ) if config is None: assert isinstance(self.model , A__ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F""" {self.model.__class__}""" ) snake_case = self.model.config else: snake_case = config snake_case = data_args snake_case = self.config.tgt_vocab_size if isinstance(self.config , A__ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for""" ''' padding..''' ) if self.args.label_smoothing == 0: snake_case = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss snake_case = label_smoothed_nll_loss def UpperCamelCase ( self , A__ ) -> Tuple: if self.optimizer is None: snake_case = ['''bias''', '''LayerNorm.weight'''] snake_case = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] snake_case = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: snake_case = Adafactor snake_case = {'''scale_parameter''': False, '''relative_step''': False} else: snake_case = AdamW snake_case = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } snake_case = self.args.learning_rate if self.sharded_ddp: snake_case = OSS( params=A__ , optim=A__ , **A__ , ) else: snake_case = optimizer_cls(A__ , **A__ ) if self.lr_scheduler is None: snake_case = self._get_lr_scheduler(A__ ) else: # ignoring --lr_scheduler logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' ) def UpperCamelCase ( self , A__ ) -> Tuple: snake_case = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": snake_case = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": snake_case = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: snake_case = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A__ ) return scheduler def UpperCamelCase ( self ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[Any]: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token snake_case = model(**A__ , use_cache=A__ )[0] snake_case = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models snake_case , snake_case = model(**A__ , labels=A__ , use_cache=A__ )[:2] else: # compute label smoothed loss snake_case = model(**A__ , use_cache=A__ )[0] snake_case = torch.nn.functional.log_softmax(A__ , dim=-1 ) snake_case , snake_case = self.loss_fn(A__ , A__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def UpperCamelCase ( self , A__ , A__ ) -> Any: snake_case = inputs.pop('''labels''' ) snake_case , snake_case = self._compute_loss(A__ , A__ , A__ ) return loss def UpperCamelCase ( self , A__ , A__ , A__ , A__ = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: snake_case = self._prepare_inputs(A__ ) snake_case = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: snake_case = self.model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **A__ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] ) snake_case = inputs.pop('''labels''' ) with torch.no_grad(): # compute loss on predict data snake_case , snake_case = self._compute_loss(A__ , A__ , A__ ) snake_case = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) snake_case = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] ) return (loss, logits, labels) def UpperCamelCase ( self , A__ , A__ ) -> List[str]: # If PAD token is not defined at least EOS token has to be defined snake_case = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( '''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be''' F""" padded to `max_length`={max_length}""" ) snake_case = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) snake_case = tensor return padded_tensor
44
0
'''simple docstring''' from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( '''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , __a , ) class _lowercase ( __a ): _UpperCAmelCase = RobertaConfig _UpperCAmelCase = '''roberta''' def __init__( self , A__ ) -> Any: super().__init__(A__ ) snake_case = RobertaEmbeddings(A__ ) self.init_weights() @add_start_docstrings( '''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top, also takes care of multi-layer training. ''' , __a , ) class _lowercase ( __a ): _UpperCAmelCase = RobertaConfig _UpperCAmelCase = '''roberta''' def __init__( self , A__ ) -> str: super().__init__(A__ ) snake_case = config.num_labels snake_case = config.num_hidden_layers snake_case = DeeRobertaModel(A__ ) snake_case = nn.Dropout(config.hidden_dropout_prob ) snake_case = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(A__ ) def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , A__=-1 , A__=False , ) -> Union[str, Any]: snake_case = self.num_layers try: snake_case = self.roberta( A__ , attention_mask=A__ , token_type_ids=A__ , position_ids=A__ , head_mask=A__ , inputs_embeds=A__ , ) snake_case = outputs[1] snake_case = self.dropout(A__ ) snake_case = self.classifier(A__ ) snake_case = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: snake_case = e.message snake_case = e.exit_layer snake_case = outputs[0] if not self.training: snake_case = entropy(A__ ) snake_case = [] snake_case = [] if labels is not None: if self.num_labels == 1: # We are doing regression snake_case = MSELoss() snake_case = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: snake_case = CrossEntropyLoss() snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits snake_case = [] for highway_exit in outputs[-1]: snake_case = highway_exit[0] if not self.training: highway_logits_all.append(A__ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression snake_case = MSELoss() snake_case = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: snake_case = CrossEntropyLoss() snake_case = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(A__ ) if train_highway: snake_case = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: snake_case = (loss,) + outputs if not self.training: snake_case = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: snake_case = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
701
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def __UpperCamelCase ( a : List[str] ) ->str: snake_case = [] for line in lines: snake_case = re.sub(R'''#.*''' , '''''' , a ) # remove comments if line: filtered_lines.append(a ) snake_case = '''\n'''.join(a ) # Make a hash from all this code snake_case = full_str.encode('''utf-8''' ) return shaaaa(a ).hexdigest() # get importable module names and hash for caching _lowercase = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions _lowercase = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _lowercase = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name _lowercase = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
44
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { 'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json', 'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json', 'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json', 'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class _lowercase ( __a ): _UpperCAmelCase = '''mobilenet_v2''' def __init__( self , A__=3 , A__=2_24 , A__=1.0 , A__=8 , A__=8 , A__=6 , A__=32 , A__=True , A__=True , A__="relu6" , A__=True , A__=0.8 , A__=0.0_2 , A__=0.0_0_1 , A__=2_55 , **A__ , ): super().__init__(**A__ ) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''' ) snake_case = num_channels snake_case = image_size snake_case = depth_multiplier snake_case = depth_divisible_by snake_case = min_depth snake_case = expand_ratio snake_case = output_stride snake_case = first_layer_is_expansion snake_case = finegrained_output snake_case = hidden_act snake_case = tf_padding snake_case = classifier_dropout_prob snake_case = initializer_range snake_case = layer_norm_eps snake_case = semantic_loss_ignore_index class _lowercase ( __a ): _UpperCAmelCase = version.parse('''1.11''' ) @property def UpperCamelCase ( self ): return OrderedDict([('''pixel_values''', {0: '''batch'''})] ) @property def UpperCamelCase ( self ): if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})] ) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] ) @property def UpperCamelCase ( self ): return 1e-4
702
'''simple docstring''' _lowercase = { 'Pillow': 'Pillow', 'accelerate': 'accelerate>=0.11.0', 'compel': 'compel==0.1.8', 'black': 'black~=23.1', 'datasets': 'datasets', 'filelock': 'filelock', 'flax': 'flax>=0.4.1', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.13.2', 'requests-mock': 'requests-mock==1.10.0', 'importlib_metadata': 'importlib_metadata', 'invisible-watermark': 'invisible-watermark', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2', 'jaxlib': 'jaxlib>=0.1.65', 'Jinja2': 'Jinja2', 'k-diffusion': 'k-diffusion>=0.0.12', 'torchsde': 'torchsde', 'note_seq': 'note_seq', 'librosa': 'librosa', 'numpy': 'numpy', 'omegaconf': 'omegaconf', 'parameterized': 'parameterized', 'protobuf': 'protobuf>=3.20.3,<4', 'pytest': 'pytest', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'ruff': 'ruff>=0.0.241', 'safetensors': 'safetensors', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'scipy': 'scipy', 'onnx': 'onnx', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'tensorboard': 'tensorboard', 'torch': 'torch>=1.4', 'torchvision': 'torchvision', 'transformers': 'transformers>=4.25.1', 'urllib3': 'urllib3<=2.0.0', }
44
0
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowercase : def __init__( self , A__ , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.0_2 , A__=3 , A__=None , ) -> List[Any]: snake_case = parent snake_case = batch_size snake_case = image_size snake_case = patch_size snake_case = num_channels snake_case = is_training snake_case = use_labels snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = intermediate_size snake_case = hidden_act snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = type_sequence_label_size snake_case = initializer_range snake_case = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case = (image_size // patch_size) ** 2 snake_case = num_patches + 1 def UpperCamelCase ( self ) -> int: snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case = None if self.use_labels: snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self ) -> int: return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]: snake_case = TFViTModel(config=A__ ) snake_case = model(A__ , training=A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. snake_case = self.image_size // 2 snake_case = pixel_values[:, :, :image_size, :image_size] snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ ) snake_case = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[int]: snake_case = self.type_sequence_label_size snake_case = TFViTForImageClassification(A__ ) snake_case = model(A__ , labels=A__ , training=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. snake_case = self.image_size // 2 snake_case = pixel_values[:, :, :image_size, :image_size] snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case = 1 snake_case = TFViTForImageClassification(A__ ) snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.prepare_config_and_inputs() snake_case , snake_case , snake_case = config_and_inputs snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class _lowercase ( __a , __a , unittest.TestCase ): _UpperCAmelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () _UpperCAmelCase = ( {'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def UpperCamelCase ( self ) -> List[Any]: snake_case = TFViTModelTester(self ) snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 ) def UpperCamelCase ( self ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def UpperCamelCase ( self ) -> int: pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def UpperCamelCase ( self ) -> str: pass def UpperCamelCase ( self ) -> Union[str, Any]: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = model_class(A__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) snake_case = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A__ , tf.keras.layers.Layer ) ) def UpperCamelCase ( self ) -> List[Any]: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = model_class(A__ ) snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case = [*signature.parameters.keys()] snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def UpperCamelCase ( self ) -> Optional[Any]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A__ ) @slow def UpperCamelCase ( self ) -> Any: snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(A__ ) def __UpperCamelCase ( ) ->Any: snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class _lowercase ( unittest.TestCase ): @cached_property def UpperCamelCase ( self ) -> Optional[int]: return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def UpperCamelCase ( self ) -> Dict: snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) snake_case = self.default_image_processor snake_case = prepare_img() snake_case = image_processor(images=A__ , return_tensors='''tf''' ) # forward pass snake_case = model(**A__ ) # verify the logits snake_case = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , A__ ) snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ) tf.debugging.assert_near(outputs.logits[0, :3] , A__ , atol=1e-4 )
703
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowercase ( __a , __a , unittest.TestCase ): _UpperCAmelCase = IFInpaintingSuperResolutionPipeline _UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} _UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) _UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''} def UpperCamelCase ( self ) -> int: return self._get_superresolution_dummy_components() def UpperCamelCase ( self , A__ , A__=0 ) -> Union[str, Any]: if str(A__ ).startswith('''mps''' ): snake_case = torch.manual_seed(A__ ) else: snake_case = torch.Generator(device=A__ ).manual_seed(A__ ) snake_case = floats_tensor((1, 3, 16, 16) , rng=random.Random(A__ ) ).to(A__ ) snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ ) snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ ) snake_case = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase ( self ) -> List[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def UpperCamelCase ( self ) -> Optional[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def UpperCamelCase ( self ) -> List[str]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def UpperCamelCase ( self ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def UpperCamelCase ( self ) -> Optional[Any]: self._test_save_load_local() def UpperCamelCase ( self ) -> Dict: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
44
0
'''simple docstring''' import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowercase ( __a , unittest.TestCase ): _UpperCAmelCase = None _UpperCAmelCase = BloomTokenizerFast _UpperCAmelCase = BloomTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = '''tokenizer_file''' _UpperCAmelCase = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''} def UpperCamelCase ( self ) -> Dict: super().setUp() snake_case = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self , **A__ ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self ) -> List[Any]: snake_case = self.get_rust_tokenizer() snake_case = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>'''] snake_case = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]] snake_case = tokenizer.batch_encode_plus(A__ )['''input_ids'''] self.assertListEqual(A__ , A__ ) snake_case = tokenizer.batch_decode(A__ ) self.assertListEqual(A__ , A__ ) def UpperCamelCase ( self , A__=6 ) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input snake_case = '''This is a simple input''' snake_case = ['''This is a simple input 1''', '''This is a simple input 2'''] snake_case = ('''This is a simple input''', '''This is a pair''') snake_case = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests try: tokenizer_r.encode(A__ , max_length=A__ ) tokenizer_r.encode_plus(A__ , max_length=A__ ) tokenizer_r.batch_encode_plus(A__ , max_length=A__ ) tokenizer_r.encode(A__ , max_length=A__ ) tokenizer_r.batch_encode_plus(A__ , max_length=A__ ) except ValueError: self.fail('''Bloom Tokenizer should be able to deal with padding''' ) snake_case = None # Hotfixing padding = None self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' ) # Simple input self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' ) # Simple input self.assertRaises( A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , ) # Pair input self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' ) # Pair input self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' ) # Pair input self.assertRaises( A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , ) def UpperCamelCase ( self ) -> Any: snake_case = self.get_rust_tokenizer() snake_case = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=A__ ) snake_case = next(iter(A__ ) )['''premise'''] # pick up one data snake_case = list(sample_data.values() ) snake_case = list(map(tokenizer.encode , A__ ) ) snake_case = [tokenizer.decode(A__ , clean_up_tokenization_spaces=A__ ) for x in output_tokens] self.assertListEqual(A__ , A__ ) def UpperCamelCase ( self ) -> Optional[Any]: # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
704
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy _lowercase = logging.get_logger(__name__) class _lowercase ( __a ): def __init__( self , A__ , A__ , A__ , **A__ ) -> Union[str, Any]: snake_case = feature_size snake_case = sampling_rate snake_case = padding_value snake_case = kwargs.pop('''padding_side''' , '''right''' ) snake_case = kwargs.pop('''return_attention_mask''' , A__ ) super().__init__(**A__ ) def UpperCamelCase ( self , A__ , A__ = True , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , ) -> BatchFeature: # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(A__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): snake_case = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( '''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`''' F""" to this method that includes {self.model_input_names[0]}, but you provided""" F""" {list(processed_features.keys() )}""" ) snake_case = processed_features[self.model_input_names[0]] snake_case = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(A__ ) == 0: if return_attention_mask: snake_case = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch snake_case = required_input[0] if isinstance(A__ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. snake_case = 0 while len(required_input[index] ) == 0: index += 1 if index < len(A__ ): snake_case = required_input[index][0] if return_tensors is None: if is_tf_tensor(A__ ): snake_case = '''tf''' elif is_torch_tensor(A__ ): snake_case = '''pt''' elif isinstance(A__ , (int, float, list, tuple, np.ndarray) ): snake_case = '''np''' else: raise ValueError( F"""type of {first_element} unknown: {type(A__ )}. """ '''Should be one of a python, numpy, pytorch or tensorflow object.''' ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): snake_case = to_numpy(A__ ) else: snake_case = [to_numpy(A__ ) for v in value] # Convert padding_strategy in PaddingStrategy snake_case = self._get_padding_strategies(padding=A__ , max_length=A__ ) snake_case = processed_features[self.model_input_names[0]] snake_case = len(A__ ) if not all(len(A__ ) == batch_size for v in processed_features.values() ): raise ValueError('''Some items in the output dictionary have a different batch size than others.''' ) snake_case = [] for i in range(A__ ): snake_case = {k: v[i] for k, v in processed_features.items()} # truncation snake_case = self._truncate( A__ , max_length=A__ , pad_to_multiple_of=A__ , truncation=A__ , ) truncated_inputs.append(A__ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) snake_case = PaddingStrategy.MAX_LENGTH snake_case = {} for i in range(A__ ): # padding snake_case = self._pad( truncated_inputs[i] , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , ) for key, value in outputs.items(): if key not in batch_outputs: snake_case = [] if value.dtype is np.dtype(np.floataa ): snake_case = value.astype(np.floataa ) batch_outputs[key].append(A__ ) return BatchFeature(A__ , tensor_type=A__ ) def UpperCamelCase ( self , A__ , A__ = None , A__ = PaddingStrategy.DO_NOT_PAD , A__ = None , A__ = None , ) -> dict: snake_case = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: snake_case = len(A__ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A__ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: snake_case = np.ones(len(A__ ) , dtype=np.intaa ) if needs_to_be_padded: snake_case = max_length - len(A__ ) if self.padding_side == "right": if return_attention_mask: snake_case = np.pad( processed_features['''attention_mask'''] , (0, difference) ) snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) snake_case = np.pad( A__ , A__ , '''constant''' , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: snake_case = np.pad( processed_features['''attention_mask'''] , (difference, 0) ) snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) snake_case = np.pad( A__ , A__ , '''constant''' , constant_values=self.padding_value ) else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return processed_features def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , ) -> Union[str, Any]: if not truncation: return processed_features elif truncation and max_length is None: raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' ) snake_case = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of snake_case = len(A__ ) > max_length if needs_to_be_truncated: snake_case = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: snake_case = processed_features['''attention_mask'''][:max_length] return processed_features def UpperCamelCase ( self , A__=False , A__=None ) -> Union[str, Any]: # Get padding strategy if padding is not False: if padding is True: snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(A__ , A__ ): snake_case = PaddingStrategy(A__ ) elif isinstance(A__ , A__ ): snake_case = padding else: snake_case = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( '''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use''' ''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' ) return padding_strategy
44
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) _lowercase = { 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys _lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
705
'''simple docstring''' from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _lowercase ( yaml.SafeLoader ): def UpperCamelCase ( self , A__ ) -> List[str]: snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value] snake_case = [tuple(A__ ) if isinstance(A__ , A__ ) else key for key in keys] snake_case = Counter(A__ ) snake_case = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def UpperCamelCase ( self , A__ , A__=False ) -> List[Any]: snake_case = super().construct_mapping(A__ , deep=A__ ) self._check_no_duplicates_on_constructed_node(A__ ) return mapping def __UpperCamelCase ( a : str ) ->Tuple[Optional[str], str]: snake_case = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: snake_case = full_content[1:].index('''---''' ) + 1 snake_case = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(a ) class _lowercase ( __a ): # class attributes _UpperCAmelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata @classmethod def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata": with open(A__ , encoding='''utf-8''' ) as readme_file: snake_case , snake_case = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(A__ ) else: return cls() def UpperCamelCase ( self , A__ ) -> str: if path.exists(): with open(A__ , encoding='''utf-8''' ) as readme_file: snake_case = readme_file.read() else: snake_case = None snake_case = self._to_readme(A__ ) with open(A__ , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(A__ ) def UpperCamelCase ( self , A__ = None ) -> str: if readme_content is not None: snake_case , snake_case = _split_yaml_from_readme(A__ ) snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata": snake_case = yaml.load(A__ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields snake_case = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**A__ ) def UpperCamelCase ( self ) -> str: return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=A__ , allow_unicode=A__ , encoding='''utf-8''' , ).decode('''utf-8''' ) _lowercase = { 'image-classification': [], 'translation': [], 'image-segmentation': [], 'fill-mask': [], 'automatic-speech-recognition': [], 'token-classification': [], 'sentence-similarity': [], 'audio-classification': [], 'question-answering': [], 'summarization': [], 'zero-shot-classification': [], 'table-to-text': [], 'feature-extraction': [], 'other': [], 'multiple-choice': [], 'text-classification': [], 'text-to-image': [], 'text2text-generation': [], 'zero-shot-image-classification': [], 'tabular-classification': [], 'tabular-regression': [], 'image-to-image': [], 'tabular-to-text': [], 'unconditional-image-generation': [], 'text-retrieval': [], 'text-to-speech': [], 'object-detection': [], 'audio-to-audio': [], 'text-generation': [], 'conversational': [], 'table-question-answering': [], 'visual-question-answering': [], 'image-to-text': [], 'reinforcement-learning': [], 'voice-activity-detection': [], 'time-series-forecasting': [], 'document-question-answering': [], } if __name__ == "__main__": from argparse import ArgumentParser _lowercase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.') ap.add_argument('readme_filepath') _lowercase = ap.parse_args() _lowercase = Path(args.readme_filepath) _lowercase = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
44
0
'''simple docstring''' import argparse import copy def __UpperCamelCase ( a : Union[str, Any] ) ->Tuple: snake_case = {} with open(a ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: snake_case = [] _list.append([line.split()[1], line.split()[2]] ) snake_case = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: snake_case = [] _list.append([line.split()[0], line.split()[2]] ) snake_case = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def __UpperCamelCase ( a : Dict , a : Tuple ) ->int: with open(a ) as f: snake_case = f.read(1 ) snake_case = start_node snake_case = [] snake_case = start_node snake_case = 0 while visiting not in first_solution: snake_case = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(a ) and k[0] not in first_solution: snake_case = k[1] snake_case = k[0] first_solution.append(a ) snake_case = distance_of_first_solution + int(a ) snake_case = best_node first_solution.append(a ) snake_case = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 snake_case = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def __UpperCamelCase ( a : Optional[int] , a : str ) ->str: snake_case = [] for n in solution[1:-1]: snake_case = solution.index(a ) for kn in solution[1:-1]: snake_case = solution.index(a ) if n == kn: continue snake_case = copy.deepcopy(a ) snake_case = kn snake_case = n snake_case = 0 for k in _tmp[:-1]: snake_case = _tmp[_tmp.index(a ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: snake_case = distance + int(i[1] ) _tmp.append(a ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) snake_case = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda a : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def __UpperCamelCase ( a : Any , a : Optional[Any] , a : int , a : Optional[int] , a : Union[str, Any] ) ->List[Any]: snake_case = 1 snake_case = first_solution snake_case = [] snake_case = distance_of_first_solution snake_case = solution while count <= iters: snake_case = find_neighborhood(a , a ) snake_case = 0 snake_case = neighborhood[index_of_best_solution] snake_case = len(a ) - 1 snake_case = False while not found: snake_case = 0 while i < len(a ): if best_solution[i] != solution[i]: snake_case = best_solution[i] snake_case = solution[i] break snake_case = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) snake_case = True snake_case = best_solution[:-1] snake_case = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: snake_case = cost snake_case = solution else: snake_case = index_of_best_solution + 1 snake_case = neighborhood[index_of_best_solution] if len(a ) >= size: tabu_list.pop(0 ) snake_case = count + 1 return best_solution_ever, best_cost def __UpperCamelCase ( a : Union[str, Any]=None ) ->Optional[Any]: snake_case = generate_neighbours(args.File ) snake_case , snake_case = generate_first_solution( args.File , a ) snake_case , snake_case = tabu_search( a , a , a , args.Iterations , args.Size , ) print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
706
'''simple docstring''' import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowercase ( __a , unittest.TestCase ): _UpperCAmelCase = CodeGenTokenizer _UpperCAmelCase = CodeGenTokenizerFast _UpperCAmelCase = True _UpperCAmelCase = {'''add_prefix_space''': True} _UpperCAmelCase = False def UpperCamelCase ( self ) -> Tuple: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] snake_case = dict(zip(A__ , range(len(A__ ) ) ) ) snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] snake_case = {'''unk_token''': '''<unk>'''} snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(A__ ) ) def UpperCamelCase ( self , **A__ ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self , **A__ ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self , A__ ) -> Tuple: snake_case = '''lower newer''' snake_case = '''lower newer''' return input_text, output_text def UpperCamelCase ( self ) -> List[Any]: snake_case = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case = '''lower newer''' snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ ) self.assertListEqual(A__ , A__ ) snake_case = tokens + [tokenizer.unk_token] snake_case = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ ) def UpperCamelCase ( self ) -> Optional[int]: if not self.test_rust_tokenizer: return snake_case = self.get_tokenizer() snake_case = self.get_rust_tokenizer(add_prefix_space=A__ ) snake_case = '''lower newer''' # Testing tokenization snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ ) snake_case = rust_tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) # Testing conversion to ids without special tokens snake_case = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ ) snake_case = rust_tokenizer.encode(A__ , add_special_tokens=A__ ) self.assertListEqual(A__ , A__ ) # Testing conversion to ids with special tokens snake_case = self.get_rust_tokenizer(add_prefix_space=A__ ) snake_case = tokenizer.encode(A__ , add_prefix_space=A__ ) snake_case = rust_tokenizer.encode(A__ ) self.assertListEqual(A__ , A__ ) # Testing the unknown token snake_case = tokens + [rust_tokenizer.unk_token] snake_case = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A__ ) , A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> List[str]: # It's very difficult to mix/test pretokenization with byte-level # And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def UpperCamelCase ( self , A__=15 ) -> Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ ) # Simple input snake_case = '''This is a simple input''' snake_case = ['''This is a simple input 1''', '''This is a simple input 2'''] snake_case = ('''This is a simple input''', '''This is a pair''') snake_case = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' ) # Simple input self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' ) # Simple input self.assertRaises( A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , ) # Pair input self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' ) # Pair input self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' ) # Pair input self.assertRaises( A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , ) def UpperCamelCase ( self ) -> Tuple: snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input snake_case = '''This is a simple input''' snake_case = ['''This is a simple input looooooooong''', '''This is a simple input'''] snake_case = ('''This is a simple input''', '''This is a pair''') snake_case = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] snake_case = tokenizer.pad_token_id snake_case = tokenizer(A__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' ) snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' ) snake_case = tokenizer(*A__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' ) snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def UpperCamelCase ( self ) -> str: snake_case = '''$$$''' snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=A__ , add_bos_token=A__ ) snake_case = '''This is a simple input''' snake_case = ['''This is a simple input 1''', '''This is a simple input 2'''] snake_case = tokenizer.bos_token_id snake_case = tokenizer(A__ ) snake_case = tokenizer(A__ ) self.assertEqual(out_s.input_ids[0] , A__ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) snake_case = tokenizer.decode(out_s.input_ids ) snake_case = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , A__ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def UpperCamelCase ( self ) -> Any: snake_case = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' ) snake_case = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#''' snake_case = '''\nif len_a > len_b: result = a\nelse: result = b''' snake_case = tokenizer.encode(A__ ) snake_case = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n'''] snake_case = tokenizer.decode(A__ , truncate_before_pattern=A__ ) self.assertEqual(A__ , A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: pass
44
0
'''simple docstring''' import unittest from transformers import DonutProcessor _lowercase = 'naver-clova-ix/donut-base' class _lowercase ( unittest.TestCase ): def UpperCamelCase ( self ) -> Dict: snake_case = DonutProcessor.from_pretrained(A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } snake_case = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) snake_case = self.processor.tokenajson(A__ ) self.assertDictEqual(A__ , A__ )
707
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowercase : def __init__( self , A__ , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.0_2 , A__=3 , A__=None , ) -> List[Any]: snake_case = parent snake_case = batch_size snake_case = image_size snake_case = patch_size snake_case = num_channels snake_case = is_training snake_case = use_labels snake_case = hidden_size snake_case = num_hidden_layers snake_case = num_attention_heads snake_case = intermediate_size snake_case = hidden_act snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = type_sequence_label_size snake_case = initializer_range snake_case = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case = (image_size // patch_size) ** 2 snake_case = num_patches + 1 def UpperCamelCase ( self ) -> int: snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case = None if self.use_labels: snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self ) -> int: return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]: snake_case = TFViTModel(config=A__ ) snake_case = model(A__ , training=A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. snake_case = self.image_size // 2 snake_case = pixel_values[:, :, :image_size, :image_size] snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ ) snake_case = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[int]: snake_case = self.type_sequence_label_size snake_case = TFViTForImageClassification(A__ ) snake_case = model(A__ , labels=A__ , training=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. snake_case = self.image_size // 2 snake_case = pixel_values[:, :, :image_size, :image_size] snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case = 1 snake_case = TFViTForImageClassification(A__ ) snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.prepare_config_and_inputs() snake_case , snake_case , snake_case = config_and_inputs snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class _lowercase ( __a , __a , unittest.TestCase ): _UpperCAmelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () _UpperCAmelCase = ( {'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = False def UpperCamelCase ( self ) -> List[Any]: snake_case = TFViTModelTester(self ) snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 ) def UpperCamelCase ( self ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def UpperCamelCase ( self ) -> int: pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def UpperCamelCase ( self ) -> str: pass def UpperCamelCase ( self ) -> Union[str, Any]: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = model_class(A__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) snake_case = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A__ , tf.keras.layers.Layer ) ) def UpperCamelCase ( self ) -> List[Any]: snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case = model_class(A__ ) snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case = [*signature.parameters.keys()] snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A__ ) def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def UpperCamelCase ( self ) -> Optional[Any]: snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A__ ) @slow def UpperCamelCase ( self ) -> Any: snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(A__ ) def __UpperCamelCase ( ) ->Any: snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class _lowercase ( unittest.TestCase ): @cached_property def UpperCamelCase ( self ) -> Optional[int]: return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def UpperCamelCase ( self ) -> Dict: snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) snake_case = self.default_image_processor snake_case = prepare_img() snake_case = image_processor(images=A__ , return_tensors='''tf''' ) # forward pass snake_case = model(**A__ ) # verify the logits snake_case = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , A__ ) snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ) tf.debugging.assert_near(outputs.logits[0, :3] , A__ , atol=1e-4 )
44
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowercase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys _lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
708
'''simple docstring''' import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path _lowercase = [ {'dataset': 'wikipedia', 'config_name': '20220301.de'}, {'dataset': 'wikipedia', 'config_name': '20220301.en'}, {'dataset': 'wikipedia', 'config_name': '20220301.fr'}, {'dataset': 'wikipedia', 'config_name': '20220301.frr'}, {'dataset': 'wikipedia', 'config_name': '20220301.it'}, {'dataset': 'wikipedia', 'config_name': '20220301.simple'}, {'dataset': 'snli', 'config_name': 'plain_text'}, {'dataset': 'eli5', 'config_name': 'LFQA_reddit'}, {'dataset': 'wiki40b', 'config_name': 'en'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'}, {'dataset': 'natural_questions', 'config_name': 'default'}, ] def __UpperCamelCase ( a : Dict=True ) ->str: if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__a ) ) class _lowercase ( __a ): _UpperCAmelCase = None _UpperCAmelCase = None def UpperCamelCase ( self , A__ , A__ ) -> str: with TemporaryDirectory() as tmp_dir: snake_case = dataset_module_factory(A__ , cache_dir=A__ ) snake_case = import_main_class(dataset_module.module_path , dataset=A__ ) snake_case = builder_cls( cache_dir=A__ , config_name=A__ , hash=dataset_module.hash , ) snake_case = '''/'''.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=A__ ).replace(os.sep , '''/''' ), config.DATASET_INFO_FILENAME, ] ) snake_case = cached_path(A__ , cache_dir=A__ ) self.assertTrue(os.path.exists(A__ ) ) @pytest.mark.integration def __UpperCamelCase ( a : List[str] ) ->Any: snake_case = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple''' snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a ) snake_case = import_main_class(dataset_module.module_path ) snake_case = builder_cls( cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam snake_case = None builder_instance.download_and_prepare() snake_case = builder_instance.as_dataset() assert ds @pytest.mark.integration def __UpperCamelCase ( a : Any ) ->Union[str, Any]: snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a ) snake_case = import_main_class(dataset_module.module_path , dataset=a ) snake_case = builder_cls( cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , ) snake_case = builder_instance.as_streaming_dataset() assert ds assert isinstance(a , a ) assert "train" in ds assert isinstance(ds['''train'''] , a ) assert next(iter(ds['''train'''] ) )
44
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowercase ( __a , unittest.TestCase ): _UpperCAmelCase = KandinskyVaaControlnetImgaImgPipeline _UpperCAmelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint'''] _UpperCAmelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint'''] _UpperCAmelCase = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _UpperCAmelCase = False @property def UpperCamelCase ( self ) -> Any: return 32 @property def UpperCamelCase ( self ) -> Optional[int]: return 32 @property def UpperCamelCase ( self ) -> Union[str, Any]: return self.time_input_dim @property def UpperCamelCase ( self ) -> List[Any]: return self.time_input_dim * 4 @property def UpperCamelCase ( self ) -> List[str]: return 1_00 @property def UpperCamelCase ( self ) -> int: torch.manual_seed(0 ) snake_case = { '''in_channels''': 8, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image_hint''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } snake_case = UNetaDConditionModel(**A__ ) return model @property def UpperCamelCase ( self ) -> Union[str, Any]: return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def UpperCamelCase ( self ) -> Union[str, Any]: torch.manual_seed(0 ) snake_case = VQModel(**self.dummy_movq_kwargs ) return model def UpperCamelCase ( self ) -> Any: snake_case = self.dummy_unet snake_case = self.dummy_movq snake_case = { '''num_train_timesteps''': 10_00, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } snake_case = DDIMScheduler(**A__ ) snake_case = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def UpperCamelCase ( self , A__ , A__=0 ) -> Tuple: snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A__ ) ).to(A__ ) snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( A__ ) # create init_image snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(A__ ) ).to(A__ ) snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case = Image.fromarray(np.uinta(A__ ) ).convert('''RGB''' ).resize((2_56, 2_56) ) # create hint snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(A__ ) ).to(A__ ) if str(A__ ).startswith('''mps''' ): snake_case = torch.manual_seed(A__ ) else: snake_case = torch.Generator(device=A__ ).manual_seed(A__ ) snake_case = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''hint''': hint, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def UpperCamelCase ( self ) -> int: snake_case = '''cpu''' snake_case = self.get_dummy_components() snake_case = self.pipeline_class(**A__ ) snake_case = pipe.to(A__ ) pipe.set_progress_bar_config(disable=A__ ) snake_case = pipe(**self.get_dummy_inputs(A__ ) ) snake_case = output.images snake_case = pipe( **self.get_dummy_inputs(A__ ) , return_dict=A__ , )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case = np.array( [0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class _lowercase ( unittest.TestCase ): def UpperCamelCase ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self ) -> Optional[Any]: snake_case = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' ) snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) snake_case = init_image.resize((5_12, 5_12) ) snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/hint_image_cat.png''' ) snake_case = torch.from_numpy(np.array(A__ ) ).float() / 2_55.0 snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) snake_case = '''A robot, 4k photo''' snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(A__ ) snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa ) snake_case = pipeline.to(A__ ) pipeline.set_progress_bar_config(disable=A__ ) snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 ) snake_case , snake_case = pipe_prior( A__ , image=A__ , strength=0.8_5 , generator=A__ , negative_prompt='''''' , ).to_tuple() snake_case = pipeline( image=A__ , image_embeds=A__ , negative_image_embeds=A__ , hint=A__ , generator=A__ , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='''np''' , ) snake_case = output.images[0] assert image.shape == (5_12, 5_12, 3) assert_mean_pixel_difference(A__ , A__ )
709
'''simple docstring''' def __UpperCamelCase ( a : int , a : int ) ->int: while b: snake_case , snake_case = b, a % b return a def __UpperCamelCase ( a : int , a : int ) ->int: return a if b == 0 else euclidean_gcd_recursive(a , a % b ) def __UpperCamelCase ( ) ->Optional[Any]: print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
44
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _lowercase ( unittest.TestCase ): def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=4_00 , A__=True , A__=None , A__=True , A__=None , A__=True , ) -> Dict: snake_case = size if size is not None else {'''shortest_edge''': 20} snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} snake_case = parent snake_case = batch_size snake_case = num_channels snake_case = image_size snake_case = min_resolution snake_case = max_resolution snake_case = do_resize snake_case = size snake_case = do_center_crop snake_case = crop_size snake_case = do_flip_channel_order def UpperCamelCase ( self ) -> Tuple: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _lowercase ( __a , unittest.TestCase ): _UpperCAmelCase = MobileViTImageProcessor if is_vision_available() else None def UpperCamelCase ( self ) -> str: snake_case = MobileViTImageProcessingTester(self ) @property def UpperCamelCase ( self ) -> int: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self ) -> Dict: snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A__ , '''do_resize''' ) ) self.assertTrue(hasattr(A__ , '''size''' ) ) self.assertTrue(hasattr(A__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(A__ , '''center_crop''' ) ) self.assertTrue(hasattr(A__ , '''do_flip_channel_order''' ) ) def UpperCamelCase ( self ) -> Optional[Any]: snake_case = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCamelCase ( self ) -> Any: pass def UpperCamelCase ( self ) -> Dict: # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ ) for image in image_inputs: self.assertIsInstance(A__ , Image.Image ) # Test not batched input snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase ( self ) -> Dict: # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ ) for image in image_inputs: self.assertIsInstance(A__ , np.ndarray ) # Test not batched input snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase ( self ) -> str: # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ ) for image in image_inputs: self.assertIsInstance(A__ , torch.Tensor ) # Test not batched input snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
710
'''simple docstring''' import argparse import copy def __UpperCamelCase ( a : Union[str, Any] ) ->Tuple: snake_case = {} with open(a ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: snake_case = [] _list.append([line.split()[1], line.split()[2]] ) snake_case = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: snake_case = [] _list.append([line.split()[0], line.split()[2]] ) snake_case = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def __UpperCamelCase ( a : Dict , a : Tuple ) ->int: with open(a ) as f: snake_case = f.read(1 ) snake_case = start_node snake_case = [] snake_case = start_node snake_case = 0 while visiting not in first_solution: snake_case = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(a ) and k[0] not in first_solution: snake_case = k[1] snake_case = k[0] first_solution.append(a ) snake_case = distance_of_first_solution + int(a ) snake_case = best_node first_solution.append(a ) snake_case = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 snake_case = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def __UpperCamelCase ( a : Optional[int] , a : str ) ->str: snake_case = [] for n in solution[1:-1]: snake_case = solution.index(a ) for kn in solution[1:-1]: snake_case = solution.index(a ) if n == kn: continue snake_case = copy.deepcopy(a ) snake_case = kn snake_case = n snake_case = 0 for k in _tmp[:-1]: snake_case = _tmp[_tmp.index(a ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: snake_case = distance + int(i[1] ) _tmp.append(a ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) snake_case = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda a : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def __UpperCamelCase ( a : Any , a : Optional[Any] , a : int , a : Optional[int] , a : Union[str, Any] ) ->List[Any]: snake_case = 1 snake_case = first_solution snake_case = [] snake_case = distance_of_first_solution snake_case = solution while count <= iters: snake_case = find_neighborhood(a , a ) snake_case = 0 snake_case = neighborhood[index_of_best_solution] snake_case = len(a ) - 1 snake_case = False while not found: snake_case = 0 while i < len(a ): if best_solution[i] != solution[i]: snake_case = best_solution[i] snake_case = solution[i] break snake_case = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) snake_case = True snake_case = best_solution[:-1] snake_case = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: snake_case = cost snake_case = solution else: snake_case = index_of_best_solution + 1 snake_case = neighborhood[index_of_best_solution] if len(a ) >= size: tabu_list.pop(0 ) snake_case = count + 1 return best_solution_ever, best_cost def __UpperCamelCase ( a : Union[str, Any]=None ) ->Optional[Any]: snake_case = generate_neighbours(args.File ) snake_case , snake_case = generate_first_solution( args.File , a ) snake_case , snake_case = tabu_search( a , a , a , args.Iterations , args.Size , ) print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
44
0
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def __UpperCamelCase ( a : dict , a : str , a : set , a : set , a : dict , a : dict , a : PriorityQueue , a : dict , a : float | int , ) ->float | int: for nxt, d in graph[v]: if nxt in visited_forward: continue snake_case = cst_fwd.get(a , np.inf ) snake_case = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) snake_case = new_cost_f snake_case = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: snake_case = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def __UpperCamelCase ( a : str , a : str , a : dict , a : dict ) ->int: snake_case = -1 snake_case = set() snake_case = set() snake_case = {source: 0} snake_case = {destination: 0} snake_case = {source: None} snake_case = {destination: None} snake_case = PriorityQueue() snake_case = PriorityQueue() snake_case = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): snake_case , snake_case = queue_forward.get() visited_forward.add(a ) snake_case , snake_case = queue_backward.get() visited_backward.add(a ) snake_case = pass_and_relaxation( a , a , a , a , a , a , a , a , a , ) snake_case = pass_and_relaxation( a , a , a , a , a , a , a , a , a , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: snake_case = shortest_distance return shortest_path_distance _lowercase = { 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } _lowercase = { 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
711
'''simple docstring''' from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
44
0
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class _lowercase : _UpperCAmelCase = 42 _UpperCAmelCase = 42 class _lowercase : def __init__( self , A__ ) -> Union[str, Any]: snake_case = [[] for _ in range(A__ )] snake_case = size def __getitem__( self , A__ ) -> Iterator[Edge]: return iter(self._graph[vertex] ) @property def UpperCamelCase ( self ) -> Union[str, Any]: return self._size def UpperCamelCase ( self , A__ , A__ , A__ ) -> str: if weight not in (0, 1): raise ValueError('''Edge weight must be either 0 or 1.''' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('''Vertex indexes must be in [0; size).''' ) self._graph[from_vertex].append(Edge(A__ , A__ ) ) def UpperCamelCase ( self , A__ , A__ ) -> int | None: snake_case = deque([start_vertex] ) snake_case = [None] * self.size snake_case = 0 while queue: snake_case = queue.popleft() snake_case = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: snake_case = current_distance + edge.weight snake_case = distances[edge.destination_vertex] if ( isinstance(A__ , A__ ) and new_distance >= dest_vertex_distance ): continue snake_case = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('''No path from start_vertex to finish_vertex.''' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
712
'''simple docstring''' from ...processing_utils import ProcessorMixin class _lowercase ( __a ): _UpperCAmelCase = '''WhisperFeatureExtractor''' _UpperCAmelCase = '''WhisperTokenizer''' def __init__( self , A__ , A__ ) -> Optional[Any]: super().__init__(A__ , A__ ) snake_case = self.feature_extractor snake_case = False def UpperCamelCase ( self , A__=None , A__=None , A__=True ) -> Union[str, Any]: return self.tokenizer.get_decoder_prompt_ids(task=A__ , language=A__ , no_timestamps=A__ ) def __call__( self , *A__ , **A__ ) -> Dict: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*A__ , **A__ ) snake_case = kwargs.pop('''audio''' , A__ ) snake_case = kwargs.pop('''sampling_rate''' , A__ ) snake_case = kwargs.pop('''text''' , A__ ) if len(A__ ) > 0: snake_case = args[0] snake_case = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: snake_case = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ ) if text is not None: snake_case = self.tokenizer(A__ , **A__ ) if text is None: return inputs elif audio is None: return encodings else: snake_case = encodings['''input_ids'''] return inputs def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]: return self.tokenizer.batch_decode(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> str: return self.tokenizer.decode(*A__ , **A__ ) def UpperCamelCase ( self , A__ , A__="np" ) -> Optional[Any]: return self.tokenizer.get_prompt_ids(A__ , return_tensors=A__ )
44
0
'''simple docstring''' from math import sqrt def __UpperCamelCase ( a : int ) ->bool: assert isinstance(a , a ) and ( number >= 0 ), "'number' must been an int and positive" snake_case = True # 0 and 1 are none primes. if number <= 1: snake_case = False for divisor in range(2 , int(round(sqrt(a ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: snake_case = False break # precondition assert isinstance(a , a ), "'status' must been from type bool" return status def __UpperCamelCase ( a : str ) ->Any: assert isinstance(a , a ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N snake_case = list(range(2 , n + 1 ) ) snake_case = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(a ) ): for j in range(i + 1 , len(a ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): snake_case = 0 # filters actual prime numbers. snake_case = [x for x in begin_list if x != 0] # precondition assert isinstance(a , a ), "'ans' must been from type list" return ans def __UpperCamelCase ( a : Tuple ) ->Union[str, Any]: assert isinstance(a , a ) and (n > 2), "'N' must been an int and > 2" snake_case = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(a ): ans.append(a ) # precondition assert isinstance(a , a ), "'ans' must been from type list" return ans def __UpperCamelCase ( a : Any ) ->int: assert isinstance(a , a ) and number >= 0, "'number' must been an int and >= 0" snake_case = [] # this list will be returns of the function. # potential prime number factors. snake_case = 2 snake_case = number if number == 0 or number == 1: ans.append(a ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(a ): while quotient != 1: if is_prime(a ) and (quotient % factor == 0): ans.append(a ) quotient /= factor else: factor += 1 else: ans.append(a ) # precondition assert isinstance(a , a ), "'ans' must been from type list" return ans def __UpperCamelCase ( a : Optional[Any] ) ->Tuple: assert isinstance(a , a ) and ( number >= 0 ), "'number' bust been an int and >= 0" snake_case = 0 # prime factorization of 'number' snake_case = prime_factorization(a ) snake_case = max(a ) # precondition assert isinstance(a , a ), "'ans' must been from type int" return ans def __UpperCamelCase ( a : Any ) ->Tuple: assert isinstance(a , a ) and ( number >= 0 ), "'number' bust been an int and >= 0" snake_case = 0 # prime factorization of 'number' snake_case = prime_factorization(a ) snake_case = min(a ) # precondition assert isinstance(a , a ), "'ans' must been from type int" return ans def __UpperCamelCase ( a : Union[str, Any] ) ->Optional[Any]: assert isinstance(a , a ), "'number' must been an int" assert isinstance(number % 2 == 0 , a ), "compare bust been from type bool" return number % 2 == 0 def __UpperCamelCase ( a : List[str] ) ->Optional[Any]: assert isinstance(a , a ), "'number' must been an int" assert isinstance(number % 2 != 0 , a ), "compare bust been from type bool" return number % 2 != 0 def __UpperCamelCase ( a : List[Any] ) ->Any: assert ( isinstance(a , a ) and (number > 2) and is_even(a ) ), "'number' must been an int, even and > 2" snake_case = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' snake_case = get_prime_numbers(a ) snake_case = len(a ) # run variable for while-loops. snake_case = 0 snake_case = None # exit variable. for break up the loops snake_case = True while i < len_pn and loop: snake_case = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: snake_case = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(a , a ) and (len(a ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def __UpperCamelCase ( a : Optional[int] , a : Tuple ) ->str: assert ( isinstance(a , a ) and isinstance(a , a ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." snake_case = 0 while numbera != 0: snake_case = numbera % numbera snake_case = numbera snake_case = rest # precondition assert isinstance(a , a ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def __UpperCamelCase ( a : Any , a : str ) ->str: assert ( isinstance(a , a ) and isinstance(a , a ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." snake_case = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' snake_case = prime_factorization(a ) snake_case = prime_factorization(a ) elif numbera == 1 or numbera == 1: snake_case = [] snake_case = [] snake_case = max(a , a ) snake_case = 0 snake_case = 0 snake_case = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: snake_case = prime_fac_a.count(a ) snake_case = prime_fac_a.count(a ) for _ in range(max(a , a ) ): ans *= n else: snake_case = prime_fac_a.count(a ) for _ in range(a ): ans *= n done.append(a ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: snake_case = prime_fac_a.count(a ) for _ in range(a ): ans *= n done.append(a ) # precondition assert isinstance(a , a ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def __UpperCamelCase ( a : List[str] ) ->Optional[Any]: assert isinstance(a , a ) and (n >= 0), "'number' must been a positive int" snake_case = 0 snake_case = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(a ): ans += 1 # precondition assert isinstance(a , a ) and is_prime( a ), "'ans' must been a prime number and from type int" return ans def __UpperCamelCase ( a : Union[str, Any] , a : int ) ->Union[str, Any]: assert ( is_prime(a ) and is_prime(a ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" snake_case = p_number_a + 1 # jump to the next number snake_case = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(a ): number += 1 while number < p_number_a: ans.append(a ) number += 1 # fetch the next prime number. while not is_prime(a ): number += 1 # precondition assert ( isinstance(a , a ) and ans[0] != p_number_a and ans[len(a ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def __UpperCamelCase ( a : Any ) ->Dict: assert isinstance(a , a ) and (n >= 1), "'n' must been int and >= 1" snake_case = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(a ) # precondition assert ans[0] == 1 and ans[len(a ) - 1] == n, "Error in function getDivisiors(...)" return ans def __UpperCamelCase ( a : Union[str, Any] ) ->Optional[int]: assert isinstance(a , a ) and ( number > 1 ), "'number' must been an int and >= 1" snake_case = get_divisors(a ) # precondition assert ( isinstance(a , a ) and (divisors[0] == 1) and (divisors[len(a ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def __UpperCamelCase ( a : List[str] , a : int ) ->List[str]: assert ( isinstance(a , a ) and isinstance(a , a ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. snake_case = gcd(abs(a ) , abs(a ) ) # precondition assert ( isinstance(a , a ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def __UpperCamelCase ( a : Any ) ->Any: assert isinstance(a , a ) and (n >= 0), "'n' must been a int and >= 0" snake_case = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def __UpperCamelCase ( a : Tuple ) ->Tuple: assert isinstance(a , a ) and (n >= 0), "'n' must been an int and >= 0" snake_case = 0 snake_case = 1 snake_case = 1 # this will be return for _ in range(n - 1 ): snake_case = ans ans += fiba snake_case = tmp return ans
713
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _lowercase ( __a ): _UpperCAmelCase = '''char''' _UpperCAmelCase = '''bpe''' _UpperCAmelCase = '''wp''' _lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _lowercase ( __a ): _UpperCAmelCase = ['''image_processor''', '''char_tokenizer'''] _UpperCAmelCase = '''ViTImageProcessor''' _UpperCAmelCase = '''MgpstrTokenizer''' def __init__( self , A__=None , A__=None , **A__ ) -> List[Any]: snake_case = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , A__ , ) snake_case = kwargs.pop('''feature_extractor''' ) snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) snake_case = tokenizer snake_case = AutoTokenizer.from_pretrained('''gpt2''' ) snake_case = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(A__ , A__ ) def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> List[str]: if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: snake_case = self.image_processor(A__ , return_tensors=A__ , **A__ ) if text is not None: snake_case = self.char_tokenizer(A__ , return_tensors=A__ , **A__ ) if text is None: return inputs elif images is None: return encodings else: snake_case = encodings['''input_ids'''] return inputs def UpperCamelCase ( self , A__ ) -> Dict: snake_case , snake_case , snake_case = sequences snake_case = char_preds.size(0 ) snake_case , snake_case = self._decode_helper(A__ , '''char''' ) snake_case , snake_case = self._decode_helper(A__ , '''bpe''' ) snake_case , snake_case = self._decode_helper(A__ , '''wp''' ) snake_case = [] snake_case = [] for i in range(A__ ): snake_case = [char_scores[i], bpe_scores[i], wp_scores[i]] snake_case = [char_strs[i], bpe_strs[i], wp_strs[i]] snake_case = scores.index(max(A__ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) snake_case = {} snake_case = final_strs snake_case = final_scores snake_case = char_strs snake_case = bpe_strs snake_case = wp_strs return out def UpperCamelCase ( self , A__ , A__ ) -> Optional[Any]: if format == DecodeType.CHARACTER: snake_case = self.char_decode snake_case = 1 snake_case = '''[s]''' elif format == DecodeType.BPE: snake_case = self.bpe_decode snake_case = 2 snake_case = '''#''' elif format == DecodeType.WORDPIECE: snake_case = self.wp_decode snake_case = 1_02 snake_case = '''[SEP]''' else: raise ValueError(F"""Format {format} is not supported.""" ) snake_case , snake_case = [], [] snake_case = pred_logits.size(0 ) snake_case = pred_logits.size(1 ) snake_case , snake_case = pred_logits.topk(1 , dim=-1 , largest=A__ , sorted=A__ ) snake_case = preds_index.view(-1 , A__ )[:, 1:] snake_case = decoder(A__ ) snake_case , snake_case = torch.nn.functional.softmax(A__ , dim=2 ).max(dim=2 ) snake_case = preds_max_prob[:, 1:] for index in range(A__ ): snake_case = preds_str[index].find(A__ ) snake_case = preds_str[index][:pred_eos] snake_case = preds_index[index].cpu().tolist() snake_case = pred_index.index(A__ ) if eos_token in pred_index else -1 snake_case = preds_max_prob[index][: pred_eos_index + 1] snake_case = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(A__ ) conf_scores.append(A__ ) return dec_strs, conf_scores def UpperCamelCase ( self , A__ ) -> int: snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(A__ )] return decode_strs def UpperCamelCase ( self , A__ ) -> List[str]: return self.bpe_tokenizer.batch_decode(A__ ) def UpperCamelCase ( self , A__ ) -> Union[str, Any]: snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(A__ )] return decode_strs
44
0
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class _lowercase ( __a ): def __init__( self , A__ , A__ , A__=10_24 , A__=10_24 , A__=3.6 ) -> Dict: snake_case = tokenizer snake_case = tokenizer.bos_token_id snake_case = dataset snake_case = seq_length snake_case = seq_length * chars_per_token * num_of_sequences def __iter__( self ) -> Optional[int]: snake_case = iter(self.dataset ) snake_case = True while more_examples: snake_case , snake_case = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(A__ )['''content'''] ) buffer_len += len(buffer[-1] ) except StopIteration: snake_case = False break snake_case = tokenizer(A__ , truncation=A__ )['''input_ids'''] snake_case = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(A__ ) , self.seq_length ): snake_case = all_token_ids[i : i + self.seq_length] if len(A__ ) == self.seq_length: yield torch.tensor(A__ ) def __UpperCamelCase ( a : Optional[int] ) ->str: snake_case = {'''streaming''': True} snake_case = load_dataset(args.dataset_name , split='''train''' , **a ) snake_case = ConstantLengthDataset(a , a , seq_length=args.seq_length ) snake_case = DataLoader(a , batch_size=args.batch_size ) return eval_dataloader def __UpperCamelCase ( a : List[Any] ) ->Dict: model.eval() snake_case = [] for step, batch in enumerate(a ): with torch.no_grad(): snake_case = model(a , labels=a ) snake_case = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(a ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break snake_case = torch.mean(torch.cat(a ) ) try: snake_case = torch.exp(a ) except OverflowError: snake_case = float('''inf''' ) return loss.item(), perplexity.item() # Setup Accelerator _lowercase = Accelerator() # Parse configuration _lowercase = HfArgumentParser(EvaluationArguments) _lowercase = parser.parse_args() set_seed(args.seed) # Logging _lowercase = logging.getLogger(__name__) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) # Load model and tokenizer _lowercase = AutoModelForCausalLM.from_pretrained(args.model_ckpt) _lowercase = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader _lowercase = create_dataloader(args) # Prepare everything with our `accelerator`. _lowercase , _lowercase = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('Evaluating and saving model after training') _lowercase , _lowercase = evaluate(args) logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
714
'''simple docstring''' import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType _lowercase , _lowercase , _lowercase = False, False, False @dataclass class _lowercase : _UpperCAmelCase = None _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = None # Automatically constructed _UpperCAmelCase = "dict" _UpperCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) _UpperCAmelCase = field(default='''Audio''' , init=__a , repr=__a ) def __call__( self ) -> Optional[Any]: return self.pa_type def UpperCamelCase ( self , A__ ) -> dict: try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err if isinstance(A__ , A__ ): return {"bytes": None, "path": value} elif isinstance(A__ , A__ ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes snake_case = BytesIO() sf.write(A__ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith('''pcm''' ): # "PCM" only has raw audio bytes if value.get('''sampling_rate''' ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' ) if value.get('''bytes''' ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) snake_case = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67 else: snake_case = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67 snake_case = BytesIO(bytes() ) sf.write(A__ , A__ , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def UpperCamelCase ( self , A__ , A__ = None ) -> dict: if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' ) snake_case , snake_case = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None) if path is None and file is None: raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err snake_case = xsplitext(A__ )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( '''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( '''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) if file is None: snake_case = token_per_repo_id or {} snake_case = path.split('''::''' )[-1] try: snake_case = string_to_dict(A__ , config.HUB_DATASETS_URL )['''repo_id'''] snake_case = token_per_repo_id[repo_id] except (ValueError, KeyError): snake_case = None with xopen(A__ , '''rb''' , use_auth_token=A__ ) as f: snake_case , snake_case = sf.read(A__ ) else: snake_case , snake_case = sf.read(A__ ) snake_case = array.T if self.mono: snake_case = librosa.to_mono(A__ ) if self.sampling_rate and self.sampling_rate != sampling_rate: snake_case = librosa.resample(A__ , orig_sr=A__ , target_sr=self.sampling_rate ) snake_case = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def UpperCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value if self.decode: raise ValueError('''Cannot flatten a decoded Audio feature.''' ) return { "bytes": Value('''binary''' ), "path": Value('''string''' ), } def UpperCamelCase ( self , A__ ) -> pa.StructArray: if pa.types.is_string(storage.type ): snake_case = pa.array([None] * len(A__ ) , type=pa.binary() ) snake_case = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): snake_case = pa.array([None] * len(A__ ) , type=pa.string() ) snake_case = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ): snake_case = pa.array([Audio().encode_example(A__ ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: snake_case = storage.field('''bytes''' ) else: snake_case = pa.array([None] * len(A__ ) , type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: snake_case = storage.field('''path''' ) else: snake_case = pa.array([None] * len(A__ ) , type=pa.string() ) snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) return array_cast(A__ , self.pa_type ) def UpperCamelCase ( self , A__ ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(A__ ): with xopen(A__ , '''rb''' ) as f: snake_case = f.read() return bytes_ snake_case = pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) snake_case = pa.array( [os.path.basename(A__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , ) snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() ) return array_cast(A__ , self.pa_type )
44
0
'''simple docstring''' import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowercase ( __a , unittest.TestCase ): _UpperCAmelCase = GPTSanJapaneseTokenizer _UpperCAmelCase = False _UpperCAmelCase = {'''do_clean_text''': False, '''add_prefix_space''': False} def UpperCamelCase ( self ) -> int: '''simple docstring''' super().setUp() # fmt: off snake_case = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>'''] # fmt: on snake_case = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀 snake_case = {'''unk_token''': '''<unk>'''} snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) with open(self.emoji_file , '''w''' ) as emoji_writer: emoji_writer.write(json.dumps(A__ ) ) def UpperCamelCase ( self , **A__ ) -> Union[str, Any]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self , A__ ) -> Any: '''simple docstring''' snake_case = '''こんにちは、世界。 \nこんばんは、㔺界。😀''' snake_case = '''こんにちは、世界。 \nこんばんは、世界。😀''' return input_text, output_text def UpperCamelCase ( self , A__ ) -> str: '''simple docstring''' snake_case , snake_case = self.get_input_output_texts(A__ ) snake_case = tokenizer.encode(A__ , add_special_tokens=A__ ) snake_case = tokenizer.decode(A__ , clean_up_tokenization_spaces=A__ ) return text, ids def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' pass # TODO add if relevant def UpperCamelCase ( self ) -> int: '''simple docstring''' pass # TODO add if relevant def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass # TODO add if relevant def UpperCamelCase ( self ) -> Dict: '''simple docstring''' snake_case = self.get_tokenizer() # Testing tokenization snake_case = '''こんにちは、世界。 こんばんは、㔺界。''' snake_case = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。'''] snake_case = tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) # Testing conversion to ids without special tokens snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] snake_case = tokenizer.convert_tokens_to_ids(A__ ) self.assertListEqual(A__ , A__ ) # Testing conversion to ids with special tokens snake_case = tokens + [tokenizer.unk_token] snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] snake_case = tokenizer.convert_tokens_to_ids(A__ ) self.assertListEqual(A__ , A__ ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' snake_case = self.get_tokenizer() # Testing tokenization snake_case = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。''' snake_case = '''こんにちは、、、、世界。こんばんは、、、、世界。''' snake_case = tokenizer.encode(A__ ) snake_case = tokenizer.decode(A__ ) self.assertEqual(A__ , A__ ) @slow def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' snake_case = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' ) # Testing tokenization snake_case = '''こんにちは、世界。''' snake_case = '''こんばんは、㔺界。😀''' snake_case = '''こんにちは、世界。こんばんは、世界。😀''' snake_case = tokenizer.encode(prefix_text + input_text ) snake_case = tokenizer.encode('''''' , prefix_text=prefix_text + input_text ) snake_case = tokenizer.encode(A__ , prefix_text=A__ ) snake_case = tokenizer.decode(A__ ) snake_case = tokenizer.decode(A__ ) snake_case = tokenizer.decode(A__ ) self.assertEqual(A__ , A__ ) self.assertEqual(A__ , A__ ) self.assertEqual(A__ , A__ ) @slow def UpperCamelCase ( self ) -> int: '''simple docstring''' snake_case = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' ) # Testing tokenization snake_case = '''こんにちは、世界。''' snake_case = '''こんばんは、㔺界。😀''' snake_case = len(tokenizer.encode(A__ ) ) - 2 snake_case = len(tokenizer.encode(A__ ) ) - 2 snake_case = [1] + [0] * (len_prefix + len_text + 1) snake_case = [1] * (len_prefix + len_text + 1) + [0] snake_case = [1] + [1] * (len_prefix) + [0] * (len_text + 1) snake_case = tokenizer(prefix_text + input_text ).token_type_ids snake_case = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids snake_case = tokenizer(A__ , prefix_text=A__ ).token_type_ids self.assertListEqual(A__ , A__ ) self.assertListEqual(A__ , A__ ) self.assertListEqual(A__ , A__ ) @slow def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' snake_case = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' ) snake_case = tokenizer.encode('''あンいワ''' ) snake_case = tokenizer.encode('''''' , prefix_text='''あンいワ''' ) snake_case = tokenizer.encode('''いワ''' , prefix_text='''あン''' ) self.assertEqual(tokenizer.decode(A__ ) , tokenizer.decode(A__ ) ) self.assertEqual(tokenizer.decode(A__ ) , tokenizer.decode(A__ ) ) self.assertNotEqual(A__ , A__ ) self.assertNotEqual(A__ , A__ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' snake_case = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' ) snake_case = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']] snake_case = tokenizer(A__ , padding=A__ ) snake_case = tokenizer.batch_encode_plus(A__ , padding=A__ ) # fmt: off snake_case = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]] snake_case = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] snake_case = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , A__ ) self.assertListEqual(x_token.token_type_ids , A__ ) self.assertListEqual(x_token.attention_mask , A__ ) self.assertListEqual(x_token_a.input_ids , A__ ) self.assertListEqual(x_token_a.token_type_ids , A__ ) self.assertListEqual(x_token_a.attention_mask , A__ ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' pass def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' pass
715
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class _lowercase : @staticmethod def UpperCamelCase ( *A__ , **A__ ) -> List[Any]: pass def __UpperCamelCase ( a : Image ) ->str: snake_case = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class _lowercase ( unittest.TestCase ): _UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]: snake_case = DepthEstimationPipeline(model=A__ , image_processor=A__ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def UpperCamelCase ( self , A__ , A__ ) -> List[Any]: snake_case = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , A__ ) import datasets snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) snake_case = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , A__ , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def UpperCamelCase ( self ) -> Optional[Any]: pass @slow @require_torch def UpperCamelCase ( self ) -> Dict: snake_case = '''Intel/dpt-large''' snake_case = pipeline('''depth-estimation''' , model=A__ ) snake_case = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) snake_case = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 ) @require_torch def UpperCamelCase ( self ) -> Any: # This is highly irregular to have no small tests. self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
44
0
'''simple docstring''' import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _lowercase ( __a , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class _lowercase ( unittest.TestCase ): @property def UpperCamelCase ( self ) -> List[Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCamelCase ( self ) -> Optional[int]: snake_case = ort.SessionOptions() snake_case = False return options def UpperCamelCase ( self ) -> List[str]: snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) snake_case = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=A__ , feature_extractor=A__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=A__ ) snake_case = '''A red cat sitting on a park bench''' snake_case = np.random.RandomState(0 ) snake_case = pipe( prompt=A__ , image=A__ , mask_image=A__ , guidance_scale=7.5 , num_inference_steps=10 , generator=A__ , output_type='''np''' , ) snake_case = output.images snake_case = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) snake_case = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase ( self ) -> int: snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) snake_case = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) snake_case = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=A__ , safety_checker=A__ , feature_extractor=A__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=A__ ) snake_case = '''A red cat sitting on a park bench''' snake_case = np.random.RandomState(0 ) snake_case = pipe( prompt=A__ , image=A__ , mask_image=A__ , guidance_scale=7.5 , num_inference_steps=20 , generator=A__ , output_type='''np''' , ) snake_case = output.images snake_case = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) snake_case = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
716
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __UpperCamelCase ( a : Optional[int] ) ->Dict: snake_case = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(a , a ) def __UpperCamelCase ( a : Optional[Any] ) ->int: snake_case = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: snake_case = s_dict.pop(a ) elif "subsample" in key: snake_case = s_dict.pop(a ) def __UpperCamelCase ( a : Optional[int] ) ->Optional[int]: snake_case , snake_case = emb.weight.shape snake_case = nn.Linear(a , a , bias=a ) snake_case = emb.weight.data return lin_layer def __UpperCamelCase ( a : Any , a : Tuple ) ->Tuple: snake_case = torch.load(a , map_location='''cpu''' ) snake_case = mam_aaa['''args'''] snake_case = mam_aaa['''model'''] snake_case = state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(a ) rename_keys(a ) snake_case = state_dict['''decoder.embed_tokens.weight'''].shape[0] snake_case = args.share_decoder_input_output_embed snake_case = [int(a ) for i in args.conv_kernel_sizes.split(''',''' )] snake_case = SpeechaTextConfig( vocab_size=a , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(a ) , conv_channels=args.conv_channels , conv_kernel_sizes=a , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=a , num_beams=5 , max_length=200 , use_cache=a , decoder_start_token_id=2 , early_stopping=a , ) snake_case = SpeechaTextForConditionalGeneration(a ) snake_case , snake_case = model.model.load_state_dict(a , strict=a ) if len(a ) > 0 and not set(a ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f""" but all the following weights are missing {missing}""" ) if tie_embeds: snake_case = make_linear_from_emb(model.model.decoder.embed_tokens ) else: snake_case = lm_head_weights model.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') _lowercase = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
44
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { 'tanreinama/GPTSAN-2.8B-spout_is_uniform': ( 'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json' ), } class _lowercase ( __a ): _UpperCAmelCase = '''gptsan-japanese''' _UpperCAmelCase = [ '''past_key_values''', ] _UpperCAmelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , A__=3_60_00 , A__=12_80 , A__=10_24 , A__=81_92 , A__=40_96 , A__=1_28 , A__=10 , A__=0 , A__=16 , A__=16 , A__=1_28 , A__=0.0 , A__=1e-5 , A__=False , A__=0.0 , A__="float32" , A__=False , A__=False , A__=False , A__=0.0_0_2 , A__=False , A__=True , A__=3_59_98 , A__=3_59_95 , A__=3_59_99 , **A__ , ) -> List[Any]: snake_case = vocab_size snake_case = max_position_embeddings snake_case = d_model snake_case = d_ff snake_case = d_ext snake_case = d_spout snake_case = num_switch_layers snake_case = num_ext_layers snake_case = num_switch_layers + num_ext_layers snake_case = num_heads snake_case = num_experts snake_case = expert_capacity snake_case = dropout_rate snake_case = layer_norm_epsilon snake_case = router_bias snake_case = router_jitter_noise snake_case = router_dtype snake_case = router_ignore_padding_tokens snake_case = output_hidden_states snake_case = output_attentions snake_case = initializer_factor snake_case = output_router_logits snake_case = use_cache super().__init__( separator_token_id=A__ , pad_token_id=A__ , eos_token_id=A__ , **A__ , )
717
'''simple docstring''' from ..utils import DummyObject, requires_backends class _lowercase ( metaclass=__a ): _UpperCAmelCase = ['''transformers''', '''torch''', '''note_seq'''] def __init__( self , *A__ , **A__ ) -> Union[str, Any]: requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def UpperCamelCase ( cls , *A__ , **A__ ) -> Optional[Any]: requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def UpperCamelCase ( cls , *A__ , **A__ ) -> Any: requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
44
0
'''simple docstring''' import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def __UpperCamelCase ( a : str , a : Optional[Any]=False ) ->Any: try: snake_case = os.environ[key] except KeyError: # KEY isn't set, default to `default`. snake_case = default else: # KEY is set, convert it to True or False. try: snake_case = strtobool(a ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"""If set, {key} must be yes or no.""" ) return _value _lowercase = parse_flag_from_env('RUN_SLOW', default=False) def __UpperCamelCase ( a : Dict ) ->List[str]: return unittest.skip('''Test was skipped''' )(a ) def __UpperCamelCase ( a : List[str] ) ->str: return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(a ) def __UpperCamelCase ( a : List[Any] ) ->Dict: return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(a ) def __UpperCamelCase ( a : Optional[Any] ) ->Dict: return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(a ) def __UpperCamelCase ( a : Any ) ->Optional[int]: return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(a ) def __UpperCamelCase ( a : Union[str, Any] ) ->int: return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(a ) def __UpperCamelCase ( a : Optional[Any] ) ->Optional[Any]: return unittest.skipUnless( is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(a ) def __UpperCamelCase ( a : Dict ) ->List[str]: return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(a ) def __UpperCamelCase ( a : str ) ->List[Any]: return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(a ) def __UpperCamelCase ( a : int ) ->Any: return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(a ) def __UpperCamelCase ( a : Optional[Any] ) ->List[str]: return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(a ) def __UpperCamelCase ( a : Any ) ->Dict: return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(a ) def __UpperCamelCase ( a : Dict ) ->List[str]: return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(a ) def __UpperCamelCase ( a : Tuple ) ->int: return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(a ) def __UpperCamelCase ( a : Optional[Any] ) ->Optional[int]: return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(a ) def __UpperCamelCase ( a : List[str] ) ->Optional[Any]: return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(a ) def __UpperCamelCase ( a : int=None , a : Tuple=None ) ->Optional[Any]: if test_case is None: return partial(a , version=a ) return unittest.skipUnless(is_torch_version('''>=''' , a ) , f"""test requires torch version >= {version}""" )(a ) def __UpperCamelCase ( a : Tuple ) ->Tuple: return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(a ) def __UpperCamelCase ( a : Dict ) ->int: return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(a ) def __UpperCamelCase ( a : Optional[Any] ) ->List[str]: return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(a ) _lowercase = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def __UpperCamelCase ( a : str ) ->Dict: return unittest.skipUnless( _atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(a ) class _lowercase ( unittest.TestCase ): _UpperCAmelCase = True @classmethod def UpperCamelCase ( cls ) -> List[Any]: snake_case = tempfile.mkdtemp() @classmethod def UpperCamelCase ( cls ) -> int: if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def UpperCamelCase ( self ) -> List[Any]: if self.clear_on_setup: for path in Path(self.tmpdir ).glob('''**/*''' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(A__ ) class _lowercase ( unittest.TestCase ): def UpperCamelCase ( self ) -> int: super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class _lowercase ( unittest.TestCase ): def UpperCamelCase ( self , A__ ) -> Optional[Any]: snake_case = mocks if isinstance(A__ , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def __UpperCamelCase ( a : List[Any] ) ->Tuple: snake_case = AcceleratorState() snake_case = tensor[None].clone().to(state.device ) snake_case = gather(a ).cpu() snake_case = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , a ): return False return True class _lowercase : def __init__( self , A__ , A__ , A__ ) -> int: snake_case = returncode snake_case = stdout snake_case = stderr async def __UpperCamelCase ( a : Tuple , a : int ) ->List[str]: while True: snake_case = await stream.readline() if line: callback(a ) else: break async def __UpperCamelCase ( a : int , a : Optional[Any]=None , a : List[Any]=None , a : Union[str, Any]=None , a : Dict=False , a : Optional[Any]=False ) ->_RunOutput: if echo: print('''\nRunning: ''' , ''' '''.join(a ) ) snake_case = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=a , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=a , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) snake_case = [] snake_case = [] def tee(a : Any , a : Tuple , a : List[Any] , a : List[Any]="" ): snake_case = line.decode('''utf-8''' ).rstrip() sink.append(a ) if not quiet: print(a , a , file=a ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda a : tee(a , a , sys.stdout , label='''stdout:''' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda a : tee(a , a , sys.stderr , label='''stderr:''' ) ) ), ] , timeout=a , ) return _RunOutput(await p.wait() , a , a ) def __UpperCamelCase ( a : Optional[int] , a : Dict=None , a : List[Any]=None , a : Tuple=180 , a : Dict=False , a : Optional[int]=True ) ->_RunOutput: snake_case = asyncio.get_event_loop() snake_case = loop.run_until_complete( _stream_subprocess(a , env=a , stdin=a , timeout=a , quiet=a , echo=a ) ) snake_case = ''' '''.join(a ) if result.returncode > 0: snake_case = '''\n'''.join(result.stderr ) raise RuntimeError( f"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" f"""The combined stderr from workers follows:\n{stderr}""" ) return result class _lowercase ( __a ): pass def __UpperCamelCase ( a : List[str] , a : Union[str, Any]=False ) ->Union[str, Any]: try: snake_case = subprocess.check_output(a , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(a , '''decode''' ): snake_case = output.decode('''utf-8''' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f"""Command `{' '.join(a )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
718
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator class _lowercase : def __init__( self , A__ ) -> None: snake_case = value snake_case = None snake_case = None class _lowercase : def __init__( self , A__ ) -> None: snake_case = tree def UpperCamelCase ( self , A__ ) -> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self ) -> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
44
0
'''simple docstring''' from scipy.stats import spearmanr import datasets _lowercase = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n' _lowercase = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n' _lowercase = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): def UpperCamelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , ) def UpperCamelCase ( self , A__ , A__ , A__=False ) -> str: snake_case = spearmanr(A__ , A__ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
719
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) _lowercase = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] _lowercase = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def __UpperCamelCase ( a : List[str] ) ->Optional[int]: snake_case = torch.load(a , map_location='''cpu''' ) return sd def __UpperCamelCase ( a : Optional[int] , a : Union[str, Any] , a : int=rename_keys_prefix ) ->Tuple: snake_case = OrderedDict() snake_case = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue snake_case = key for name_pair in rename_keys_prefix: snake_case = new_key.replace(name_pair[0] , name_pair[1] ) snake_case = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately snake_case = new_d['''cls.predictions.bias'''] return new_d @torch.no_grad() def __UpperCamelCase ( a : Optional[int] , a : int ) ->Union[str, Any]: assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: snake_case = '''pretraining''' if "vcr" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 512} elif "vqa_advanced" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048} elif "vqa" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048} elif "nlvr" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 1024} else: raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 512} snake_case = '''multichoice''' elif "vqa_advanced" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048} snake_case = '''vqa_advanced''' elif "vqa" in checkpoint_path: snake_case = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129} snake_case = '''vqa''' elif "nlvr" in checkpoint_path: snake_case = { '''visual_embedding_dim''': 1024, '''num_labels''': 2, } snake_case = '''nlvr''' snake_case = VisualBertConfig(**a ) # Load State Dict snake_case = load_state_dict(a ) snake_case = get_new_dict(a , a ) if model_type == "pretraining": snake_case = VisualBertForPreTraining(a ) elif model_type == "vqa": snake_case = VisualBertForQuestionAnswering(a ) elif model_type == "nlvr": snake_case = VisualBertForVisualReasoning(a ) elif model_type == "multichoice": snake_case = VisualBertForMultipleChoice(a ) model.load_state_dict(a ) # Save Checkpoints Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') _lowercase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
44
0
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class _lowercase ( __a , unittest.TestCase ): _UpperCAmelCase = BlenderbotSmallTokenizer _UpperCAmelCase = False def UpperCamelCase ( self ) -> int: super().setUp() snake_case = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] snake_case = dict(zip(A__ , range(len(A__ ) ) ) ) snake_case = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] snake_case = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(A__ ) ) def UpperCamelCase ( self , **A__ ) -> Tuple: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **A__ ) def UpperCamelCase ( self , A__ ) -> Dict: snake_case = '''adapt act apte''' snake_case = '''adapt act apte''' return input_text, output_text def UpperCamelCase ( self ) -> Dict: snake_case = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case = '''adapt act apte''' snake_case = ['''adapt''', '''act''', '''ap@@''', '''te'''] snake_case = tokenizer.tokenize(A__ ) self.assertListEqual(A__ , A__ ) snake_case = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] snake_case = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ ) def UpperCamelCase ( self ) -> Optional[Any]: snake_case = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] snake_case = '''I am a small frog.''' snake_case = tok([src_text] , padding=A__ , truncation=A__ )['''input_ids'''] snake_case = tok.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def UpperCamelCase ( self ) -> str: snake_case = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) snake_case = '''I am a small frog .''' snake_case = '''.''' snake_case = tok(A__ )['''input_ids'''] snake_case = tok(A__ )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
720
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def __UpperCamelCase ( a : Dict , a : Optional[int] , a : Dict , a : Dict ) ->Union[str, Any]: snake_case = original_name.split('''.''' )[0] snake_case = key.split('''.''' ) snake_case = int(key_list[key_list.index(a ) - 2] ) snake_case = int(key_list[key_list.index(a ) - 1] ) snake_case = orig_block_num - offset snake_case = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" ) return key def __UpperCamelCase ( a : Tuple ) ->Dict: snake_case = OrderedDict() snake_case , snake_case = 0, 0 for key, value in state_dict.items(): if key.startswith('''network''' ): snake_case = key.replace('''network''' , '''poolformer.encoder''' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('''bias''' ) and "patch_embed" not in key: patch_emb_offset += 1 snake_case = key[: key.find('''proj''' )] snake_case = key.replace(a , f"""patch_embeddings.{total_embed_found}.""" ) snake_case = key.replace('''proj''' , '''projection''' ) if key.endswith('''bias''' ): total_embed_found += 1 if "patch_embeddings" in key: snake_case = '''poolformer.encoder.''' + key if "mlp.fc1" in key: snake_case = replace_key_with_offset(a , a , '''mlp.fc1''' , '''output.conv1''' ) if "mlp.fc2" in key: snake_case = replace_key_with_offset(a , a , '''mlp.fc2''' , '''output.conv2''' ) if "norm1" in key: snake_case = replace_key_with_offset(a , a , '''norm1''' , '''before_norm''' ) if "norm2" in key: snake_case = replace_key_with_offset(a , a , '''norm2''' , '''after_norm''' ) if "layer_scale_1" in key: snake_case = replace_key_with_offset(a , a , '''layer_scale_1''' , '''layer_scale_1''' ) if "layer_scale_2" in key: snake_case = replace_key_with_offset(a , a , '''layer_scale_2''' , '''layer_scale_2''' ) if "head" in key: snake_case = key.replace('''head''' , '''classifier''' ) snake_case = value return new_state_dict def __UpperCamelCase ( ) ->Optional[int]: snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case = Image.open(requests.get(a , stream=a ).raw ) return image @torch.no_grad() def __UpperCamelCase ( a : Dict , a : Optional[Any] , a : Tuple ) ->List[str]: snake_case = PoolFormerConfig() # set attributes based on model_name snake_case = '''huggingface/label-files''' snake_case = model_name[-3:] snake_case = 1000 snake_case = '''imagenet-1k-id2label.json''' snake_case = (1, 1000) # set config attributes snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) ) snake_case = {int(a ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} if size == "s12": snake_case = [2, 2, 6, 2] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 0.9 elif size == "s24": snake_case = [4, 4, 12, 4] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 0.9 elif size == "s36": snake_case = [6, 6, 18, 6] snake_case = [64, 128, 320, 512] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.9 elif size == "m36": snake_case = [6, 6, 18, 6] snake_case = [96, 192, 384, 768] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.95 elif size == "m48": snake_case = [8, 8, 24, 8] snake_case = [96, 192, 384, 768] snake_case = 4.0 snake_case = 1e-6 snake_case = 0.95 else: raise ValueError(f"""Size {size} not supported""" ) # load image processor snake_case = PoolFormerImageProcessor(crop_pct=a ) # Prepare image snake_case = prepare_img() snake_case = image_processor(images=a , return_tensors='''pt''' ).pixel_values logger.info(f"""Converting model {model_name}...""" ) # load original state dict snake_case = torch.load(a , map_location=torch.device('''cpu''' ) ) # rename keys snake_case = rename_keys(a ) # create HuggingFace model and load state dict snake_case = PoolFormerForImageClassification(a ) model.load_state_dict(a ) model.eval() # Define image processor snake_case = PoolFormerImageProcessor(crop_pct=a ) snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values # forward pass snake_case = model(a ) snake_case = outputs.logits # define expected logit slices for different models if size == "s12": snake_case = torch.tensor([-0.3045, -0.6758, -0.4869] ) elif size == "s24": snake_case = torch.tensor([0.4402, -0.1374, -0.8045] ) elif size == "s36": snake_case = torch.tensor([-0.6080, -0.5133, -0.5898] ) elif size == "m36": snake_case = torch.tensor([0.3952, 0.2263, -1.2668] ) elif size == "m48": snake_case = torch.tensor([0.1167, -0.0656, -0.3423] ) else: raise ValueError(f"""Size {size} not supported""" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , a , atol=1e-2 ) # finally, save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(a ).mkdir(exist_ok=a ) model.save_pretrained(a ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) _lowercase = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
44
0
from ...processing_utils import ProcessorMixin class _lowercase ( __a ): _UpperCAmelCase = '''WhisperFeatureExtractor''' _UpperCAmelCase = '''WhisperTokenizer''' def __init__( self , A__ , A__ ) -> Optional[Any]: super().__init__(A__ , A__ ) snake_case = self.feature_extractor snake_case = False def UpperCamelCase ( self , A__=None , A__=None , A__=True ) -> Union[str, Any]: return self.tokenizer.get_decoder_prompt_ids(task=A__ , language=A__ , no_timestamps=A__ ) def __call__( self , *A__ , **A__ ) -> Dict: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*A__ , **A__ ) snake_case = kwargs.pop('''audio''' , A__ ) snake_case = kwargs.pop('''sampling_rate''' , A__ ) snake_case = kwargs.pop('''text''' , A__ ) if len(A__ ) > 0: snake_case = args[0] snake_case = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: snake_case = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ ) if text is not None: snake_case = self.tokenizer(A__ , **A__ ) if text is None: return inputs elif audio is None: return encodings else: snake_case = encodings['''input_ids'''] return inputs def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]: return self.tokenizer.batch_decode(*A__ , **A__ ) def UpperCamelCase ( self , *A__ , **A__ ) -> str: return self.tokenizer.decode(*A__ , **A__ ) def UpperCamelCase ( self , A__ , A__="np" ) -> Optional[Any]: return self.tokenizer.get_prompt_ids(A__ , return_tensors=A__ )
721
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow _lowercase = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) _lowercase = logging.getLogger() def __UpperCamelCase ( ) ->Tuple: snake_case = argparse.ArgumentParser() parser.add_argument('''-f''' ) snake_case = parser.parse_args() return args.f def __UpperCamelCase ( a : Dict , a : Tuple="eval" ) ->List[Any]: snake_case = os.path.join(a , f"""{split}_results.json""" ) if os.path.exists(a ): with open(a , '''r''' ) as f: return json.load(a ) raise ValueError(f"""can't find {path}""" ) _lowercase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _lowercase ( __a ): def UpperCamelCase ( self ) -> List[str]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(A__ , '''argv''' , A__ ): run_flax_glue.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 ) @slow def UpperCamelCase ( self ) -> List[Any]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A__ , '''argv''' , A__ ): run_clm_flax.main() snake_case = get_results(A__ ) self.assertLess(result['''eval_perplexity'''] , 1_00 ) @slow def UpperCamelCase ( self ) -> int: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(A__ , '''argv''' , A__ ): run_summarization_flax.main() snake_case = get_results(A__ , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def UpperCamelCase ( self ) -> Union[str, Any]: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(A__ , '''argv''' , A__ ): run_mlm_flax.main() snake_case = get_results(A__ ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def UpperCamelCase ( self ) -> Dict: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(A__ , '''argv''' , A__ ): run_ta_mlm_flax.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 ) @slow def UpperCamelCase ( self ) -> int: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu snake_case = 7 if get_gpu_count() > 1 else 2 snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(A__ , '''argv''' , A__ ): run_flax_ner.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def UpperCamelCase ( self ) -> Any: snake_case = self.get_auto_remove_tmp_dir() snake_case = F""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(A__ , '''argv''' , A__ ): run_qa.main() snake_case = get_results(A__ ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
44
0
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class SCREAMING_SNAKE_CASE (unittest.TestCase ): def __init__( self : List[Any] , a : Dict , a : str=13 , a : List[str]=7 , a : int=True , a : Tuple=True , a : str=True , a : List[str]=True , a : str=99 , a : str=32 , a : Tuple=5 , a : Dict=4 , a : Optional[Any]=37 , a : Dict="gelu" , a : Optional[int]=0.1 , a : Any=0.1 , a : Dict=512 , a : Any=16 , a : Tuple=2 , a : Optional[Any]=0.02 , a : Any=4 , )-> str: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_attention_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = num_choices def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> int: """simple docstring""" lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ = None if self.use_attention_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Tuple: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = True lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : str = True _UpperCamelCase : List[str] = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]: """simple docstring""" lowercase__ = FlaxRobertaModelTester(self ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple: """simple docstring""" for model_class_name in self.all_model_classes: lowercase__ = model_class_name.from_pretrained('roberta-base' , from_pt=a ) lowercase__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(a )
45
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Tuple = 'ClapFeatureExtractor' _UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self : List[Any] , a : int , a : str )-> Any: """simple docstring""" super().__init__(a , a ) def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]: """simple docstring""" lowercase__ = kwargs.pop('sampling_rate' , a ) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.' ) if text is not None: lowercase__ = self.tokenizer(a , return_tensors=a , **a ) if audios is not None: lowercase__ = self.feature_extractor( a , sampling_rate=a , return_tensors=a , **a ) if text is not None and audios is not None: lowercase__ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a ) , tensor_type=a ) def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*a , **a ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict: """simple docstring""" return self.tokenizer.decode(*a , **a ) @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]: """simple docstring""" lowercase__ = self.tokenizer.model_input_names lowercase__ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
45
1