code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from typing import Any class snake_case : """simple docstring""" def __init__( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = data lowerCamelCase_ = None def __repr__( self ): """simple docstring""" return f'''Node({self.data})''' class snake_case : """simple docstring""" def __init__( self ): """simple docstring""" lowerCamelCase_ = None def __iter__( self ): """simple docstring""" lowerCamelCase_ = self.head while node: yield node.data lowerCamelCase_ = node.next def __len__( self ): """simple docstring""" return sum(1 for _ in self ) def __repr__( self ): """simple docstring""" return "->".join([str(UpperCamelCase ) for item in self] ) def __getitem__( self , UpperCamelCase ): """simple docstring""" if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" if not 0 <= index < len(self ): raise ValueError("list index out of range." ) lowerCamelCase_ = self.head for _ in range(UpperCamelCase ): lowerCamelCase_ = current.next lowerCamelCase_ = data def snake_case ( self , UpperCamelCase ): """simple docstring""" self.insert_nth(len(self ) , UpperCamelCase ) def snake_case ( self , UpperCamelCase ): """simple docstring""" self.insert_nth(0 , UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) lowerCamelCase_ = Node(UpperCamelCase ) if self.head is None: lowerCamelCase_ = new_node elif index == 0: lowerCamelCase_ = self.head # link new_node to head lowerCamelCase_ = new_node else: lowerCamelCase_ = self.head for _ in range(index - 1 ): lowerCamelCase_ = temp.next lowerCamelCase_ = temp.next lowerCamelCase_ = new_node def snake_case ( self ): # print every node data """simple docstring""" print(self ) def snake_case ( self ): """simple docstring""" return self.delete_nth(0 ) def snake_case ( self ): # delete from tail """simple docstring""" return self.delete_nth(len(self ) - 1 ) def snake_case ( self , UpperCamelCase = 0 ): """simple docstring""" if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) lowerCamelCase_ = self.head # default first node if index == 0: lowerCamelCase_ = self.head.next else: lowerCamelCase_ = self.head for _ in range(index - 1 ): lowerCamelCase_ = temp.next lowerCamelCase_ = temp.next lowerCamelCase_ = temp.next.next return delete_node.data def snake_case ( self ): """simple docstring""" return self.head is None def snake_case ( self ): """simple docstring""" lowerCamelCase_ = None lowerCamelCase_ = self.head while current: # Store the current node's next node. lowerCamelCase_ = current.next # Make the current node's next point backwards lowerCamelCase_ = prev # Make the previous node be the current node lowerCamelCase_ = current # Make the current node the next node (to progress iteration) lowerCamelCase_ = next_node # Return prev in order to put the head at the end lowerCamelCase_ = prev def __snake_case ( ): lowerCamelCase_ = LinkedList() assert linked_list.is_empty() is True assert str(UpperCAmelCase_ ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(UpperCAmelCase_ ) == i linked_list.insert_nth(UpperCAmelCase_ , i + 1 ) assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(UpperCAmelCase_ ) == 9 assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): lowerCamelCase_ = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(-8 , 1 ) ) def __snake_case ( ): lowerCamelCase_ = [ -9, 100, Node(77345112 ), "dlrow olleH", 7, 5555, 0, -192.5_5555, "Hello, world!", 77.9, Node(10 ), None, None, 12.20, ] lowerCamelCase_ = LinkedList() for i in test_input: linked_list.insert_tail(UpperCAmelCase_ ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(UpperCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head lowerCamelCase_ = linked_list.delete_head() assert result == -9 assert ( str(UpperCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail lowerCamelCase_ = linked_list.delete_tail() assert result == 12.2 assert ( str(UpperCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list lowerCamelCase_ = linked_list.delete_nth(10 ) assert result is None assert ( str(UpperCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!" ) ) assert ( str(UpperCAmelCase_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(UpperCAmelCase_ ) assert ( str(UpperCAmelCase_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(UpperCAmelCase_ ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def __snake_case ( ): from doctest import testmod testmod() lowerCamelCase_ = LinkedList() linked_list.insert_head(input("Inserting 1st at head " ).strip() ) linked_list.insert_head(input("Inserting 2nd at head " ).strip() ) print("\nPrint list:" ) linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() ) linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() ) print("\nPrint list:" ) linked_list.print_list() print("\nDelete head" ) linked_list.delete_head() print("Delete tail" ) linked_list.delete_tail() print("\nPrint list:" ) linked_list.print_list() print("\nReverse linked list" ) linked_list.reverse() print("\nPrint list:" ) linked_list.print_list() print("\nString representation of linked list:" ) print(UpperCAmelCase_ ) print("\nReading/changing Node data using indexing:" ) print(F'''Element at Position 1: {linked_list[1]}''' ) lowerCamelCase_ = input("Enter New Value: " ).strip() print("New list:" ) print(UpperCAmelCase_ ) print(F'''length of linked_list is : {len(UpperCAmelCase_ )}''' ) if __name__ == "__main__": main()
675
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a_ : Optional[int] = logging.get_logger(__name__) a_ : Dict = { """SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""", # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = "deformable_detr" _lowerCamelCase = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=3 , UpperCamelCase=300 , UpperCamelCase=1024 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=0.0 , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=256 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=1.0 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="sine" , UpperCamelCase="resnet50" , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=False , UpperCamelCase=300 , UpperCamelCase=False , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=0.1 , UpperCamelCase=0.25 , UpperCamelCase=False , **UpperCamelCase , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) lowerCamelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = backbone_config.get("model_type" ) lowerCamelCase_ = CONFIG_MAPPING[backbone_model_type] lowerCamelCase_ = config_class.from_dict(UpperCamelCase ) lowerCamelCase_ = use_timm_backbone lowerCamelCase_ = backbone_config lowerCamelCase_ = num_channels lowerCamelCase_ = num_queries lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = d_model lowerCamelCase_ = encoder_ffn_dim lowerCamelCase_ = encoder_layers lowerCamelCase_ = encoder_attention_heads lowerCamelCase_ = decoder_ffn_dim lowerCamelCase_ = decoder_layers lowerCamelCase_ = decoder_attention_heads lowerCamelCase_ = dropout lowerCamelCase_ = attention_dropout lowerCamelCase_ = activation_dropout lowerCamelCase_ = activation_function lowerCamelCase_ = init_std lowerCamelCase_ = init_xavier_std lowerCamelCase_ = encoder_layerdrop lowerCamelCase_ = auxiliary_loss lowerCamelCase_ = position_embedding_type lowerCamelCase_ = backbone lowerCamelCase_ = use_pretrained_backbone lowerCamelCase_ = dilation # deformable attributes lowerCamelCase_ = num_feature_levels lowerCamelCase_ = encoder_n_points lowerCamelCase_ = decoder_n_points lowerCamelCase_ = two_stage lowerCamelCase_ = two_stage_num_proposals lowerCamelCase_ = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher lowerCamelCase_ = class_cost lowerCamelCase_ = bbox_cost lowerCamelCase_ = giou_cost # Loss coefficients lowerCamelCase_ = mask_loss_coefficient lowerCamelCase_ = dice_loss_coefficient lowerCamelCase_ = bbox_loss_coefficient lowerCamelCase_ = giou_loss_coefficient lowerCamelCase_ = eos_coefficient lowerCamelCase_ = focal_alpha lowerCamelCase_ = disable_custom_kernels super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase ) @property def snake_case ( self ): """simple docstring""" return self.encoder_attention_heads @property def snake_case ( self ): """simple docstring""" return self.d_model def snake_case ( self ): """simple docstring""" lowerCamelCase_ = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowerCamelCase_ = self.backbone_config.to_dict() lowerCamelCase_ = self.__class__.model_type return output
675
1
'''simple docstring''' import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging a_ : List[str] = logging.get_logger(__name__) a_ : Tuple = R""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. """ class snake_case ( lowercase ): """simple docstring""" @add_start_docstrings(UpperCamelCase ) def __call__( self , UpperCamelCase , UpperCamelCase , **UpperCamelCase ): """simple docstring""" raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" lowerCamelCase_ = max_length lowerCamelCase_ = max_position_embeddings @add_start_docstrings(UpperCamelCase ) def __call__( self , UpperCamelCase , UpperCamelCase , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = input_ids.shape[-1] lowerCamelCase_ = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " f'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ''' "exceptions, performance degradation, or nothing at all." ) return is_done class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " f'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ''' "with `max_length = start_length + max_new_tokens` instead." , UpperCamelCase , ) lowerCamelCase_ = start_length lowerCamelCase_ = max_new_tokens lowerCamelCase_ = start_length + max_new_tokens @add_start_docstrings(UpperCamelCase ) def __call__( self , UpperCamelCase , UpperCamelCase , **UpperCamelCase ): """simple docstring""" return input_ids.shape[-1] >= self.max_length class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" lowerCamelCase_ = max_time lowerCamelCase_ = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(UpperCamelCase ) def __call__( self , UpperCamelCase , UpperCamelCase , **UpperCamelCase ): """simple docstring""" return time.time() - self.initial_timestamp > self.max_time class snake_case ( lowercase ): """simple docstring""" @add_start_docstrings(UpperCamelCase ) def __call__( self , UpperCamelCase , UpperCamelCase , **UpperCamelCase ): """simple docstring""" return any(criteria(UpperCamelCase , UpperCamelCase ) for criteria in self ) @property def snake_case ( self ): """simple docstring""" for stopping_criterium in self: if isinstance(UpperCamelCase , UpperCamelCase ): return stopping_criterium.max_length elif isinstance(UpperCamelCase , UpperCamelCase ): return stopping_criterium.max_length return None def __snake_case ( UpperCAmelCase_ : StoppingCriteriaList , UpperCAmelCase_ : int ): lowerCamelCase_ = stopping_criteria.max_length lowerCamelCase_ = deepcopy(UpperCAmelCase_ ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , UpperCAmelCase_ ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=UpperCAmelCase_ ) ) return new_stopping_criteria
675
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class snake_case ( pl.LightningModule ): """simple docstring""" def __init__( self , UpperCamelCase ): """simple docstring""" super().__init__() lowerCamelCase_ = model lowerCamelCase_ = 2 lowerCamelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels ) def snake_case ( self ): """simple docstring""" pass def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str ): # load longformer model from model identifier lowerCamelCase_ = LongformerModel.from_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = LightningModel(UpperCAmelCase_ ) lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model lowerCamelCase_ = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase_ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(UpperCAmelCase_ ) print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' ) if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--longformer_model""", default=None, type=str, required=True, help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""", ) parser.add_argument( """--longformer_question_answering_ckpt_path""", default=None, type=str, required=True, help="""Path the official PyTorch Lightning Checkpoint.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Tuple = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
675
1
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]="pt" ): lowerCamelCase_ = {"add_prefix_space": True} if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and not line.startswith(" " ) else {} lowerCamelCase_ = padding_side return tokenizer( [line] , max_length=UpperCAmelCase_ , padding="max_length" if pad_to_max_length else None , truncation=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=None , ): lowerCamelCase_ = input_ids.ne(UpperCAmelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase="train" , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="" , ): """simple docstring""" super().__init__() lowerCamelCase_ = Path(UpperCamelCase ).joinpath(type_path + ".source" ) lowerCamelCase_ = Path(UpperCamelCase ).joinpath(type_path + ".target" ) lowerCamelCase_ = self.get_char_lens(self.src_file ) lowerCamelCase_ = max_source_length lowerCamelCase_ = max_target_length assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}''' lowerCamelCase_ = tokenizer lowerCamelCase_ = prefix if n_obs is not None: lowerCamelCase_ = self.src_lens[:n_obs] lowerCamelCase_ = src_lang lowerCamelCase_ = tgt_lang def __len__( self ): """simple docstring""" return len(self.src_lens ) def __getitem__( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = index + 1 # linecache starts at 1 lowerCamelCase_ = self.prefix + linecache.getline(str(self.src_file ) , UpperCamelCase ).rstrip("\n" ) lowerCamelCase_ = linecache.getline(str(self.tgt_file ) , UpperCamelCase ).rstrip("\n" ) assert source_line, f'''empty source line for index {index}''' assert tgt_line, f'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer , UpperCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right lowerCamelCase_ = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCamelCase ) else self.tokenizer ) lowerCamelCase_ = self.tokenizer.generator if isinstance(self.tokenizer , UpperCamelCase ) else self.tokenizer lowerCamelCase_ = encode_line(UpperCamelCase , UpperCamelCase , self.max_source_length , "right" ) lowerCamelCase_ = encode_line(UpperCamelCase , UpperCamelCase , self.max_target_length , "right" ) lowerCamelCase_ = source_inputs["input_ids"].squeeze() lowerCamelCase_ = target_inputs["input_ids"].squeeze() lowerCamelCase_ = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def snake_case ( UpperCamelCase ): """simple docstring""" return [len(UpperCamelCase ) for x in Path(UpperCamelCase ).open().readlines()] def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = torch.stack([x["input_ids"] for x in batch] ) lowerCamelCase_ = torch.stack([x["attention_mask"] for x in batch] ) lowerCamelCase_ = torch.stack([x["decoder_input_ids"] for x in batch] ) lowerCamelCase_ = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , UpperCamelCase ) else self.tokenizer.pad_token_id ) lowerCamelCase_ = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , UpperCamelCase ) else self.tokenizer.pad_token_id ) lowerCamelCase_ = trim_batch(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ ,lowerCamelCase_ = trim_batch(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase ) lowerCamelCase_ = { "input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y, } return batch a_ : Any = getLogger(__name__) def __snake_case ( UpperCAmelCase_ : List[List] ): return list(itertools.chain.from_iterable(UpperCAmelCase_ ) ) def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = get_git_info() save_json(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , "git_log.json" ) ) def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=4 , **UpperCAmelCase_ : Union[str, Any] ): with open(UpperCAmelCase_ , "w" ) as f: json.dump(UpperCAmelCase_ , UpperCAmelCase_ , indent=UpperCAmelCase_ , **UpperCAmelCase_ ) def __snake_case ( UpperCAmelCase_ : Optional[int] ): with open(UpperCAmelCase_ ) as f: return json.load(UpperCAmelCase_ ) def __snake_case ( ): lowerCamelCase_ = git.Repo(search_parent_directories=UpperCAmelCase_ ) lowerCamelCase_ = { "repo_id": str(UpperCAmelCase_ ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), "hostname": str(socket.gethostname() ), } return repo_infos def __snake_case ( UpperCAmelCase_ : Callable , UpperCAmelCase_ : Iterable ): return list(map(UpperCAmelCase_ , UpperCAmelCase_ ) ) def __snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ): with open(UpperCAmelCase_ , "wb" ) as f: return pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( UpperCAmelCase_ : Union[str, Any] ): def remove_articles(UpperCAmelCase_ : Tuple ): return re.sub(r"\b(a|an|the)\b" , " " , UpperCAmelCase_ ) def white_space_fix(UpperCAmelCase_ : str ): return " ".join(text.split() ) def remove_punc(UpperCAmelCase_ : Optional[Any] ): lowerCamelCase_ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(UpperCAmelCase_ : List[str] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase_ ) ) ) ) def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ): lowerCamelCase_ = normalize_answer(UpperCAmelCase_ ).split() lowerCamelCase_ = normalize_answer(UpperCAmelCase_ ).split() lowerCamelCase_ = Counter(UpperCAmelCase_ ) & Counter(UpperCAmelCase_ ) lowerCamelCase_ = sum(common.values() ) if num_same == 0: return 0 lowerCamelCase_ = 1.0 * num_same / len(UpperCAmelCase_ ) lowerCamelCase_ = 1.0 * num_same / len(UpperCAmelCase_ ) lowerCamelCase_ = (2 * precision * recall) / (precision + recall) return fa def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] ): return normalize_answer(UpperCAmelCase_ ) == normalize_answer(UpperCAmelCase_ ) def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ): assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) lowerCamelCase_ = 0 for hypo, pred in zip(UpperCAmelCase_ , UpperCAmelCase_ ): em += exact_match_score(UpperCAmelCase_ , UpperCAmelCase_ ) if len(UpperCAmelCase_ ) > 0: em /= len(UpperCAmelCase_ ) return {"em": em} def __snake_case ( UpperCAmelCase_ : Tuple ): return model_prefix.startswith("rag" ) def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict ): lowerCamelCase_ = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead lowerCamelCase_ = "dropout_rate" for p in extra_params: if getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): if not hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) and not hasattr(UpperCAmelCase_ , equivalent_param[p] ): logger.info("config doesn't have a `{}` attribute".format(UpperCAmelCase_ ) ) delattr(UpperCAmelCase_ , UpperCAmelCase_ ) continue lowerCamelCase_ = p if hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) else equivalent_param[p] setattr(UpperCAmelCase_ , UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) delattr(UpperCAmelCase_ , UpperCAmelCase_ ) return hparams, config
675
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a_ : Optional[Any] = { """configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""], """tokenization_ctrl""": ["""CTRLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : str = [ """CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """CTRLForSequenceClassification""", """CTRLLMHeadModel""", """CTRLModel""", """CTRLPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ """TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCTRLForSequenceClassification""", """TFCTRLLMHeadModel""", """TFCTRLModel""", """TFCTRLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
675
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , UpperCamelCase=0 , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope lowerCamelCase_ = projection_dim def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , ) lowerCamelCase_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFDPRContextEncoder(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFDPRQuestionEncoder(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFDPRReader(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {"input_ids": input_ids} return config, inputs_dict @require_tf class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) _lowerCamelCase = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {} _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFDPRModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFDPRReader.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @require_tf class snake_case ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" ) lowerCamelCase_ = tf.constant( [[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP] lowerCamelCase_ = model(UpperCamelCase )[0] # embedding shape = (1, 768) # compare the actual values for a slice. lowerCamelCase_ = tf.constant( [ [ 0.03_236_253, 0.12_753_335, 0.16_818_509, 0.00_279_786, 0.3_896_933, 0.24_264_945, 0.2_178_971, -0.02_335_227, -0.08_481_959, -0.14_324_117, ] ] ) self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
675
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a_ : Any = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = GPTSwaTokenizer _lowerCamelCase = False _lowerCamelCase = True _lowerCamelCase = False def snake_case ( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = "This is a test" lowerCamelCase_ = "This is a test" return input_text, output_text def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "<s>" lowerCamelCase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(UpperCamelCase ) , 2000 ) def snake_case ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 2000 ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase ) lowerCamelCase_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [465, 287, 265, 631, 842] ) lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) # fmt: off self.assertListEqual( UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , ) # fmt: on lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase ) self.assertListEqual( UpperCamelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) lowerCamelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) # fmt: off self.assertListEqual( UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ) # fmt: on def snake_case ( self ): """simple docstring""" lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase ) lowerCamelCase_ = ["This is a test", "I was born in 92000, and this is falsé."] lowerCamelCase_ = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(UpperCamelCase , UpperCamelCase ): self.assertListEqual(tokenizer.encode_fast(UpperCamelCase ) , UpperCamelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(tokenizer.decode_fast(UpperCamelCase ) , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = [ "<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off lowerCamelCase_ = {"input_ids": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=UpperCamelCase , )
675
1
'''simple docstring''' def __snake_case ( UpperCAmelCase_ : int ): # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError("The given input must be positive" ) # get the generated string sequence lowerCamelCase_ = gray_code_sequence_string(UpperCAmelCase_ ) # # convert them to integers for i in range(len(UpperCAmelCase_ ) ): lowerCamelCase_ = int(sequence[i] , 2 ) return sequence def __snake_case ( UpperCAmelCase_ : int ): # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] lowerCamelCase_ = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits lowerCamelCase_ = gray_code_sequence_string(bit_count - 1 ) lowerCamelCase_ = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): lowerCamelCase_ = "0" + smaller_sequence[i] sequence.append(UpperCAmelCase_ ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): lowerCamelCase_ = "1" + smaller_sequence[i] sequence.append(UpperCAmelCase_ ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
675
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = ["image_processor", "tokenizer"] _lowerCamelCase = "OwlViTImageProcessor" _lowerCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCamelCase , ) lowerCamelCase_ = kwargs.pop("feature_extractor" ) lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCamelCase , UpperCamelCase ) def __call__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="max_length" , UpperCamelCase="np" , **UpperCamelCase ): """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(UpperCamelCase , UpperCamelCase ) or (isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(text[0] , UpperCamelCase )): lowerCamelCase_ = [self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )] elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(text[0] , UpperCamelCase ): lowerCamelCase_ = [] # Maximum number of queries across batch lowerCamelCase_ = max([len(UpperCamelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(UpperCamelCase ) != max_num_queries: lowerCamelCase_ = t + [" "] * (max_num_queries - len(UpperCamelCase )) lowerCamelCase_ = self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) encodings.append(UpperCamelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": lowerCamelCase_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowerCamelCase_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowerCamelCase_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) lowerCamelCase_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowerCamelCase_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) lowerCamelCase_ = BatchEncoding() lowerCamelCase_ = input_ids lowerCamelCase_ = attention_mask if query_images is not None: lowerCamelCase_ = BatchEncoding() lowerCamelCase_ = self.image_processor( UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ).pixel_values lowerCamelCase_ = query_pixel_values if images is not None: lowerCamelCase_ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: lowerCamelCase_ = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowerCamelCase_ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process_object_detection(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process_image_guided_detection(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase , ) return self.image_processor_class @property def snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase , ) return self.image_processor
675
1
'''simple docstring''' import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class snake_case ( unittest.TestCase ): """simple docstring""" @require_torch def snake_case ( self ): """simple docstring""" lowerCamelCase_ = pipeline( task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" ) lowerCamelCase_ = load_dataset("ashraq/esc50" ) lowerCamelCase_ = dataset["train"]["audio"][-1]["array"] lowerCamelCase_ = audio_classifier(UpperCamelCase , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(UpperCamelCase ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , ) @unittest.skip("No models are available in TF" ) def snake_case ( self ): """simple docstring""" pass @slow @require_torch def snake_case ( self ): """simple docstring""" lowerCamelCase_ = pipeline( task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , ) # This is an audio of a dog lowerCamelCase_ = load_dataset("ashraq/esc50" ) lowerCamelCase_ = dataset["train"]["audio"][-1]["array"] lowerCamelCase_ = audio_classifier(UpperCamelCase , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(UpperCamelCase ) , [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ] , ) lowerCamelCase_ = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(UpperCamelCase ) , [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) lowerCamelCase_ = audio_classifier( [audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 ) self.assertEqual( nested_simplify(UpperCamelCase ) , [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) @unittest.skip("No models are available in TF" ) def snake_case ( self ): """simple docstring""" pass
675
'''simple docstring''' import os import sys import unittest a_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path a_ : Tuple = os.path.join(git_repo_path, """src""", """transformers""") a_ : List[Any] = """ {0} = None """ a_ : Optional[Any] = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ a_ : str = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" ) self.assertIsNone(UpperCamelCase ) lowerCamelCase_ = find_backend(" if not is_tokenizers_available():" ) self.assertEqual(UpperCamelCase , "tokenizers" ) lowerCamelCase_ = find_backend(" if not is_tensorflow_text_available():" ) self.assertEqual(UpperCamelCase , "tensorflow_text" ) lowerCamelCase_ = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers" ) lowerCamelCase_ = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tensorflow_text" ) lowerCamelCase_ = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers_and_vision" ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , UpperCamelCase ) self.assertIn("tensorflow_text" , UpperCamelCase ) self.assertIn("sentencepiece_and_tokenizers" , UpperCamelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertModel" , objects["tf"] ) self.assertIn("FlaxBertModel" , objects["flax"] ) self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] ) self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(UpperCamelCase , "\nCONSTANT = None\n" ) lowerCamelCase_ = create_dummy_object("function" , "'torch'" ) self.assertEqual( UpperCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) lowerCamelCase_ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n" lowerCamelCase_ = create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n" lowerCamelCase_ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , UpperCamelCase )
675
1
'''simple docstring''' import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=30 , UpperCamelCase=2 , UpperCamelCase=3 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=10 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=None , UpperCamelCase=2 , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = patch_size lowerCamelCase_ = num_channels lowerCamelCase_ = is_training lowerCamelCase_ = use_labels lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = scope lowerCamelCase_ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowerCamelCase_ = (image_size // patch_size) ** 2 lowerCamelCase_ = num_patches + 2 def snake_case ( self ): """simple docstring""" lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def snake_case ( self ): """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = DeiTModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = DeiTForMaskedImageModeling(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase_ = 1 lowerCamelCase_ = DeiTForMaskedImageModeling(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.type_sequence_label_size lowerCamelCase_ = DeiTForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase_ = 1 lowerCamelCase_ = DeiTForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) _lowerCamelCase = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def snake_case ( self ): """simple docstring""" lowerCamelCase_ = DeiTModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ): """simple docstring""" lowerCamelCase_ = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def snake_case ( self ): """simple docstring""" if not self.model_tester.is_training: return lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(UpperCamelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue lowerCamelCase_ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.train() lowerCamelCase_ = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase ) lowerCamelCase_ = model(**UpperCamelCase ).loss loss.backward() def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return lowerCamelCase_ = False lowerCamelCase_ = True for model_class in self.all_model_classes: if model_class in get_values(UpperCamelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue lowerCamelCase_ = model_class(UpperCamelCase ) model.gradient_checkpointing_enable() model.to(UpperCamelCase ) model.train() lowerCamelCase_ = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase ) lowerCamelCase_ = model(**UpperCamelCase ).loss loss.backward() def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(UpperCamelCase ), *get_values(UpperCamelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ): lowerCamelCase_ = problem_type["title"] lowerCamelCase_ = problem_type["num_labels"] lowerCamelCase_ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.train() lowerCamelCase_ = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase ) if problem_type["num_labels"] > 1: lowerCamelCase_ = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) lowerCamelCase_ = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=UpperCamelCase ) as warning_list: lowerCamelCase_ = model(**UpperCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def snake_case ( self ): """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = DeiTModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def __snake_case ( ): lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self ): """simple docstring""" return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( UpperCamelCase ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase ) # forward pass with torch.no_grad(): lowerCamelCase_ = model(**UpperCamelCase ) # verify the logits lowerCamelCase_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowerCamelCase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def snake_case ( self ): """simple docstring""" lowerCamelCase_ = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=UpperCamelCase , return_tensors="pt" ) lowerCamelCase_ = inputs.pixel_values.to(UpperCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowerCamelCase_ = model(UpperCamelCase )
675
'''simple docstring''' from ..utils import DummyObject, requires_backends class snake_case ( metaclass=lowercase ): """simple docstring""" _lowerCamelCase = ["onnx"] def __init__( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(self , ["onnx"] ) @classmethod def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(cls , ["onnx"] ) @classmethod def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(cls , ["onnx"] )
675
1
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a_ : int = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) a_ : int = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""), ("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""), ("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""), ("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""), ("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""), ("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""), ("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""), ("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""), ("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""), ("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""), ] ) def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): lowerCamelCase_ = state_dict.pop(UpperCAmelCase_ ) lowerCamelCase_ = val def __snake_case ( UpperCAmelCase_ : Tuple ): lowerCamelCase_ = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: lowerCamelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) lowerCamelCase_ = value else: lowerCamelCase_ = value return new_state_dict def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict=False ): lowerCamelCase_ = "" if is_panoptic: lowerCamelCase_ = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) lowerCamelCase_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) lowerCamelCase_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase_ = in_proj_weight[:256, :] lowerCamelCase_ = in_proj_bias[:256] lowerCamelCase_ = in_proj_weight[256:512, :] lowerCamelCase_ = in_proj_bias[256:512] lowerCamelCase_ = in_proj_weight[-256:, :] lowerCamelCase_ = in_proj_bias[-256:] def __snake_case ( ): lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase_ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ) return im @torch.no_grad() def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ): lowerCamelCase_ = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: lowerCamelCase_ = "resnet101" if "dc5" in model_name: lowerCamelCase_ = True lowerCamelCase_ = "panoptic" in model_name if is_panoptic: lowerCamelCase_ = 250 else: lowerCamelCase_ = 91 lowerCamelCase_ = "huggingface/label-files" lowerCamelCase_ = "coco-detection-id2label.json" lowerCamelCase_ = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="dataset" ) , "r" ) ) lowerCamelCase_ = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()} lowerCamelCase_ = idalabel lowerCamelCase_ = {v: k for k, v in idalabel.items()} # load image processor lowerCamelCase_ = "coco_panoptic" if is_panoptic else "coco_detection" lowerCamelCase_ = ConditionalDetrImageProcessor(format=UpperCAmelCase_ ) # prepare image lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ) lowerCamelCase_ = encoding["pixel_values"] logger.info(F'''Converting model {model_name}...''' ) # load original model from torch hub lowerCamelCase_ = torch.hub.load("DeppMeng/ConditionalDETR" , UpperCAmelCase_ , pretrained=UpperCAmelCase_ ).eval() lowerCamelCase_ = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: lowerCamelCase_ = "conditional_detr." + src rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = rename_backbone_keys(UpperCAmelCase_ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCAmelCase_ , is_panoptic=UpperCAmelCase_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them lowerCamelCase_ = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): lowerCamelCase_ = state_dict.pop(UpperCAmelCase_ ) lowerCamelCase_ = val elif "class_labels_classifier" in key or "bbox_predictor" in key: lowerCamelCase_ = state_dict.pop(UpperCAmelCase_ ) lowerCamelCase_ = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: lowerCamelCase_ = state_dict.pop(UpperCAmelCase_ ) lowerCamelCase_ = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): lowerCamelCase_ = state_dict.pop(UpperCAmelCase_ ) lowerCamelCase_ = val # finally, create HuggingFace model and load state dict lowerCamelCase_ = ConditionalDetrForSegmentation(UpperCAmelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCAmelCase_ ) model.load_state_dict(UpperCAmelCase_ ) model.eval() model.push_to_hub(repo_id=UpperCAmelCase_ , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion lowerCamelCase_ = conditional_detr(UpperCAmelCase_ ) lowerCamelCase_ = model(UpperCAmelCase_ ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ ) model.save_pretrained(UpperCAmelCase_ ) image_processor.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": a_ : Any = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""conditional_detr_resnet50""", type=str, help="""Name of the CONDITIONAL_DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) a_ : List[str] = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
675
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , UpperCamelCase=1000 , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope lowerCamelCase_ = range_bbox def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCamelCase_ = bbox[i, j, 3] lowerCamelCase_ = bbox[i, j, 1] lowerCamelCase_ = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCamelCase_ = bbox[i, j, 2] lowerCamelCase_ = bbox[i, j, 0] lowerCamelCase_ = t lowerCamelCase_ = tf.convert_to_tensor(UpperCamelCase ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModel(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMForMaskedLM(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = TFLayoutLMForSequenceClassification(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = TFLayoutLMForTokenClassification(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMForQuestionAnswering(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) _lowerCamelCase = ( { "feature-extraction": TFLayoutLMModel, "fill-mask": TFLayoutLMForMaskedLM, "text-classification": TFLayoutLMForSequenceClassification, "token-classification": TFLayoutLMForTokenClassification, "zero-shot": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase = False _lowerCamelCase = True _lowerCamelCase = 10 def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFLayoutLMModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def snake_case ( self ): """simple docstring""" pass def __snake_case ( ): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off lowerCamelCase_ = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) lowerCamelCase_ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class snake_case ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) # test the sequence output on [0, :3, :3] lowerCamelCase_ = tf.convert_to_tensor( [[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase , atol=1e-3 ) ) # test the pooled output on [1, :3] lowerCamelCase_ = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , UpperCamelCase , atol=1e-3 ) ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized sequence classification head lowerCamelCase_ = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model( input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar lowerCamelCase_ = outputs.loss lowerCamelCase_ = (2,) self.assertEqual(loss.shape , UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = outputs.logits lowerCamelCase_ = (2, 2) self.assertEqual(logits.shape , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized token classification head lowerCamelCase_ = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model( input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = outputs.logits lowerCamelCase_ = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized token classification head lowerCamelCase_ = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , UpperCamelCase ) self.assertEqual(outputs.end_logits.shape , UpperCamelCase )
675
1
'''simple docstring''' from random import shuffle import tensorflow as tf from numpy import array def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] ): lowerCamelCase_ = int(UpperCAmelCase_ ) assert noofclusters < len(UpperCAmelCase_ ) # Find out the dimensionality lowerCamelCase_ = len(vectors[0] ) # Will help select random centroids from among the available vectors lowerCamelCase_ = list(range(len(UpperCAmelCase_ ) ) ) shuffle(UpperCAmelCase_ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. lowerCamelCase_ = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION lowerCamelCase_ = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points lowerCamelCase_ = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(UpperCAmelCase_ ) ] ##These nodes will assign the centroid Variables the appropriate ##values lowerCamelCase_ = tf.placeholder("float64" , [dim] ) lowerCamelCase_ = [] for centroid in centroids: cent_assigns.append(tf.assign(UpperCAmelCase_ , UpperCAmelCase_ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) lowerCamelCase_ = [tf.Variable(0 ) for i in range(len(UpperCAmelCase_ ) )] ##These nodes will assign an assignment Variable the appropriate ##value lowerCamelCase_ = tf.placeholder("int32" ) lowerCamelCase_ = [] for assignment in assignments: cluster_assigns.append(tf.assign(UpperCAmelCase_ , UpperCAmelCase_ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input lowerCamelCase_ = tf.placeholder("float" , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors lowerCamelCase_ = tf.reduce_mean(UpperCAmelCase_ , 0 ) ##Node for computing Euclidean distances # Placeholders for input lowerCamelCase_ = tf.placeholder("float" , [dim] ) lowerCamelCase_ = tf.placeholder("float" , [dim] ) lowerCamelCase_ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(UpperCAmelCase_ , UpperCAmelCase_ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input lowerCamelCase_ = tf.placeholder("float" , [noofclusters] ) lowerCamelCase_ = tf.argmin(UpperCAmelCase_ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. lowerCamelCase_ = tf.initialize_all_variables() # Initialize all variables sess.run(UpperCAmelCase_ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. lowerCamelCase_ = 100 for _ in range(UpperCAmelCase_ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(UpperCAmelCase_ ) ): lowerCamelCase_ = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. lowerCamelCase_ = [ sess.run(UpperCAmelCase_ , feed_dict={va: vect, va: sess.run(UpperCAmelCase_ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input lowerCamelCase_ = sess.run( UpperCAmelCase_ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(UpperCAmelCase_ ): # Collect all the vectors assigned to this cluster lowerCamelCase_ = [ vectors[i] for i in range(len(UpperCAmelCase_ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location lowerCamelCase_ = sess.run( UpperCAmelCase_ , feed_dict={mean_input: array(UpperCAmelCase_ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments lowerCamelCase_ = sess.run(UpperCAmelCase_ ) lowerCamelCase_ = sess.run(UpperCAmelCase_ ) return centroids, assignments
675
'''simple docstring''' import argparse from collections import defaultdict import yaml a_ : int = """docs/source/en/_toctree.yml""" def __snake_case ( UpperCAmelCase_ : Optional[int] ): lowerCamelCase_ = defaultdict(UpperCAmelCase_ ) lowerCamelCase_ = [] lowerCamelCase_ = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"local": doc["local"], "title": doc["title"]} ) else: new_doc_list.append(UpperCAmelCase_ ) lowerCamelCase_ = new_doc_list lowerCamelCase_ = [key for key, value in counts.items() if value > 1] lowerCamelCase_ = [] for duplicate_key in duplicates: lowerCamelCase_ = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} ) if len(UpperCAmelCase_ ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] ) lowerCamelCase_ = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(UpperCAmelCase_ ) > 1: raise ValueError("{doc_list} has two 'overview' docs which is not allowed." ) overview_doc.extend(UpperCAmelCase_ ) # Sort return overview_doc def __snake_case ( UpperCAmelCase_ : List[str]=False ): with open(UpperCAmelCase_ , encoding="utf-8" ) as f: lowerCamelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCamelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCamelCase_ = content[api_idx]["sections"] # Then to the model doc lowerCamelCase_ = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 lowerCamelCase_ = api_doc[scheduler_idx]["sections"] lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ ) lowerCamelCase_ = False if new_scheduler_doc != scheduler_doc: lowerCamelCase_ = True if overwrite: lowerCamelCase_ = new_scheduler_doc if diff: if overwrite: lowerCamelCase_ = api_doc with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) def __snake_case ( UpperCAmelCase_ : List[Any]=False ): with open(UpperCAmelCase_ , encoding="utf-8" ) as f: lowerCamelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCamelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCamelCase_ = content[api_idx]["sections"] # Then to the model doc lowerCamelCase_ = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 lowerCamelCase_ = False lowerCamelCase_ = api_doc[pipeline_idx]["sections"] lowerCamelCase_ = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: lowerCamelCase_ = pipeline_doc["section"] lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ ) if overwrite: lowerCamelCase_ = new_sub_pipeline_doc new_pipeline_docs.append(UpperCAmelCase_ ) # sort overall pipeline doc lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ ) if new_pipeline_docs != pipeline_docs: lowerCamelCase_ = True if overwrite: lowerCamelCase_ = new_pipeline_docs if diff: if overwrite: lowerCamelCase_ = api_doc with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": a_ : Tuple = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") a_ : int = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
675
1
'''simple docstring''' from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration a_ : Optional[int] = HfArgumentParser(InitializationArguments) a_ : str = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization a_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks a_ : str = { """vocab_size""": len(tokenizer), """scale_attn_by_inverse_layer_idx""": True, """reorder_and_upcast_attn""": True, } # Load model config (GPT-2 large in this case) a_ : Optional[Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config a_ : Optional[Any] = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
675
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=1024 , UpperCAmelCase_ : Tuple=1024 , UpperCAmelCase_ : List[Any]=False , **UpperCAmelCase_ : Optional[Any] ): lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="train" , **UpperCAmelCase_ ) lowerCamelCase_ = tok.pad_token_id def get_lens(UpperCAmelCase_ : List[str] ): lowerCamelCase_ = tqdm( DataLoader(UpperCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) lowerCamelCase_ = [] for batch in dl: lowerCamelCase_ = batch["input_ids"].ne(UpperCAmelCase_ ).sum(1 ).tolist() lowerCamelCase_ = batch["labels"].ne(UpperCAmelCase_ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(UpperCAmelCase_ , UpperCAmelCase_ ): max_lens.append(max(UpperCAmelCase_ , UpperCAmelCase_ ) ) else: max_lens.extend(UpperCAmelCase_ ) return max_lens lowerCamelCase_ = get_lens(UpperCAmelCase_ ) lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="val" , **UpperCAmelCase_ ) lowerCamelCase_ = get_lens(UpperCAmelCase_ ) pickle_save(UpperCAmelCase_ , train_ds.len_file ) pickle_save(UpperCAmelCase_ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
675
1
'''simple docstring''' from math import isqrt, loga def __snake_case ( UpperCAmelCase_ : int ): lowerCamelCase_ = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCamelCase_ = False return [i for i in range(2 , UpperCAmelCase_ ) if is_prime[i]] def __snake_case ( UpperCAmelCase_ : int = 800800 , UpperCAmelCase_ : int = 800800 ): lowerCamelCase_ = degree * loga(UpperCAmelCase_ ) lowerCamelCase_ = int(UpperCAmelCase_ ) lowerCamelCase_ = calculate_prime_numbers(UpperCAmelCase_ ) lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = len(UpperCAmelCase_ ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'''{solution() = }''')
675
'''simple docstring''' def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key lowerCamelCase_ = remove_duplicates(key.upper() ) lowerCamelCase_ = len(UpperCAmelCase_ ) # First fill cipher with key characters lowerCamelCase_ = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase_ )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(UpperCAmelCase_ ) , 26 ): lowerCamelCase_ = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 lowerCamelCase_ = alphabet[i - offset] lowerCamelCase_ = char return cipher_alphabet def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict[str, str] ): return "".join(cipher_map.get(UpperCAmelCase_ , UpperCAmelCase_ ) for ch in message.upper() ) def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict[str, str] ): lowerCamelCase_ = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(UpperCAmelCase_ , UpperCAmelCase_ ) for ch in message.upper() ) def __snake_case ( ): lowerCamelCase_ = input("Enter message to encode or decode: " ).strip() lowerCamelCase_ = input("Enter keyword: " ).strip() lowerCamelCase_ = input("Encipher or decipher? E/D:" ).strip()[0].lower() try: lowerCamelCase_ = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError("invalid input option" ) lowerCamelCase_ = create_cipher_map(UpperCAmelCase_ ) print(func(UpperCAmelCase_ , UpperCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
675
1
'''simple docstring''' def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : float , UpperCAmelCase_ : float ): return round(float(moles / volume ) * nfactor ) def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ): return round(float((moles * 0.0821 * temperature) / (volume) ) ) def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ): return round(float((moles * 0.0821 * temperature) / (pressure) ) ) def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ): return round(float((pressure * volume) / (0.0821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
675
'''simple docstring''' import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = OpenAIGPTTokenizer _lowerCamelCase = OpenAIGPTTokenizerFast _lowerCamelCase = True _lowerCamelCase = False def snake_case ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) lowerCamelCase_ = ["#version: 0.2", "l o", "lo w", "e r</w>", ""] lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(UpperCamelCase ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(UpperCamelCase ) ) def snake_case ( self , UpperCamelCase ): """simple docstring""" return "lower newer", "lower newer" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) lowerCamelCase_ = "lower" lowerCamelCase_ = ["low", "er</w>"] lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = tokens + ["<unk>"] lowerCamelCase_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase ) def snake_case ( self , UpperCamelCase=15 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) # Simple input lowerCamelCase_ = "This is a simple input" lowerCamelCase_ = ["This is a simple input 1", "This is a simple input 2"] lowerCamelCase_ = ("This is a simple input", "This is a pair") lowerCamelCase_ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Simple input self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Simple input self.assertRaises( UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , ) # Pair input self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Pair input self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Pair input self.assertRaises( UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , ) def snake_case ( self ): """simple docstring""" pass @require_ftfy @require_spacy @require_tokenizers class snake_case ( lowercase ): """simple docstring""" pass
675
1
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = ["image_processor", "tokenizer"] _lowerCamelCase = "OwlViTImageProcessor" _lowerCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCamelCase , ) lowerCamelCase_ = kwargs.pop("feature_extractor" ) lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCamelCase , UpperCamelCase ) def __call__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="max_length" , UpperCamelCase="np" , **UpperCamelCase ): """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(UpperCamelCase , UpperCamelCase ) or (isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(text[0] , UpperCamelCase )): lowerCamelCase_ = [self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )] elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(text[0] , UpperCamelCase ): lowerCamelCase_ = [] # Maximum number of queries across batch lowerCamelCase_ = max([len(UpperCamelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(UpperCamelCase ) != max_num_queries: lowerCamelCase_ = t + [" "] * (max_num_queries - len(UpperCamelCase )) lowerCamelCase_ = self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) encodings.append(UpperCamelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": lowerCamelCase_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowerCamelCase_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowerCamelCase_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) lowerCamelCase_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowerCamelCase_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) lowerCamelCase_ = BatchEncoding() lowerCamelCase_ = input_ids lowerCamelCase_ = attention_mask if query_images is not None: lowerCamelCase_ = BatchEncoding() lowerCamelCase_ = self.image_processor( UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ).pixel_values lowerCamelCase_ = query_pixel_values if images is not None: lowerCamelCase_ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: lowerCamelCase_ = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowerCamelCase_ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process_object_detection(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process_image_guided_detection(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase , ) return self.image_processor_class @property def snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase , ) return self.image_processor
675
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer a_ : Optional[int] = logging.get_logger(__name__) a_ : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} a_ : int = { """vocab_file""": { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt""" ), } } a_ : Any = { """junnyu/roformer_chinese_small""": 1536, """junnyu/roformer_chinese_base""": 1536, """junnyu/roformer_chinese_char_small""": 512, """junnyu/roformer_chinese_char_base""": 512, """junnyu/roformer_small_discriminator""": 128, """junnyu/roformer_small_generator""": 128, } a_ : List[Any] = { """junnyu/roformer_chinese_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_base""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True}, """junnyu/roformer_small_discriminator""": {"""do_lower_case""": True}, """junnyu/roformer_small_generator""": {"""do_lower_case""": True}, } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase = RoFormerTokenizer def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ): """simple docstring""" super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , ) lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("lowercase" , UpperCamelCase ) != do_lower_case or pre_tok_state.get("strip_accents" , UpperCamelCase ) != strip_accents ): lowerCamelCase_ = getattr(UpperCamelCase , pre_tok_state.pop("type" ) ) lowerCamelCase_ = do_lower_case lowerCamelCase_ = strip_accents lowerCamelCase_ = pre_tok_class(**UpperCamelCase ) lowerCamelCase_ = do_lower_case def __getstate__( self ): """simple docstring""" lowerCamelCase_ = self.__dict__.copy() lowerCamelCase_ = BertPreTokenizer() return state def __setstate__( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = d lowerCamelCase_ = self.__dict__["_tokenizer"].get_vocab() lowerCamelCase_ = PreTokenizer.custom(JiebaPreTokenizer(UpperCamelCase ) ) def snake_case ( self , UpperCamelCase , UpperCamelCase=None ): """simple docstring""" lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , **UpperCamelCase , ): """simple docstring""" lowerCamelCase_ = BertPreTokenizer() return super().save_pretrained(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
675
1
'''simple docstring''' import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch a_ : List[Any] = """sshleifer/bart-tiny-random""" a_ : int = """patrickvonplaten/t5-tiny-random""" @require_torch class snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self ): """simple docstring""" return AutoConfig.from_pretrained(UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,*lowerCamelCase_ = create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,*lowerCamelCase_ = create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=1 , d=UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,*lowerCamelCase_ = create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=1 , d=UpperCamelCase ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,*lowerCamelCase_ = create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def snake_case ( self ): """simple docstring""" with self.assertRaises(UpperCamelCase ): create_student_by_copying_alternating_layers(UpperCamelCase , tempfile.mkdtemp() , e=UpperCamelCase , d=UpperCamelCase )
675
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=32 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=[10, 20, 30, 40] , UpperCamelCase=[2, 2, 3, 2] , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=10 , UpperCamelCase=0.02 , UpperCamelCase=["stage2", "stage3", "stage4"] , UpperCamelCase=3 , UpperCamelCase=None , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = num_channels lowerCamelCase_ = num_stages lowerCamelCase_ = hidden_sizes lowerCamelCase_ = depths lowerCamelCase_ = is_training lowerCamelCase_ = use_labels lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = out_features lowerCamelCase_ = num_labels lowerCamelCase_ = scope lowerCamelCase_ = num_stages def snake_case ( self ): """simple docstring""" lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def snake_case ( self ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def snake_case ( self ): """simple docstring""" return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = UperNetForSemanticSegmentation(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = (UperNetForSemanticSegmentation,) if is_torch_available() else () _lowerCamelCase = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def snake_case ( self ): """simple docstring""" lowerCamelCase_ = UperNetModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self ): """simple docstring""" return def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase ) @unittest.skip(reason="UperNet does not use inputs_embeds" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="UperNet does not support input and output embeddings" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="UperNet does not have a base model" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="UperNet does not have a base model" ) def snake_case ( self ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = _config_zero_init(UpperCamelCase ) lowerCamelCase_ = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowerCamelCase_ = model_class(config=UpperCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip(reason="UperNet does not have tied weights" ) def snake_case ( self ): """simple docstring""" pass @slow def snake_case ( self ): """simple docstring""" for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def __snake_case ( ): lowerCamelCase_ = hf_hub_download( repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" ) lowerCamelCase_ = Image.open(UpperCAmelCase_ ).convert("RGB" ) return image @require_torch @require_vision @slow class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" ) lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(UpperCamelCase ) lowerCamelCase_ = prepare_img() lowerCamelCase_ = processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase ) with torch.no_grad(): lowerCamelCase_ = model(**UpperCamelCase ) lowerCamelCase_ = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowerCamelCase_ = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" ) lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(UpperCamelCase ) lowerCamelCase_ = prepare_img() lowerCamelCase_ = processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase ) with torch.no_grad(): lowerCamelCase_ = model(**UpperCamelCase ) lowerCamelCase_ = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowerCamelCase_ = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
675
1
'''simple docstring''' import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=3 , UpperCamelCase=32 , UpperCamelCase=3 , UpperCamelCase=10 , UpperCamelCase=[10, 20, 30, 40] , UpperCamelCase=[1, 1, 2, 1] , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=3 , UpperCamelCase=None , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = num_channels lowerCamelCase_ = embeddings_size lowerCamelCase_ = hidden_sizes lowerCamelCase_ = depths lowerCamelCase_ = is_training lowerCamelCase_ = use_labels lowerCamelCase_ = hidden_act lowerCamelCase_ = num_labels lowerCamelCase_ = scope lowerCamelCase_ = len(UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = self.get_config() return config, pixel_values def snake_case ( self ): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = FlaxRegNetModel(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = FlaxRegNetForImageClassification(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def snake_case ( self ): """simple docstring""" lowerCamelCase_ = FlaxRegNetModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase ) def snake_case ( self ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self ): """simple docstring""" return def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) @unittest.skip(reason="RegNet does not use inputs_embeds" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="RegNet does not support input and output embeddings" ) def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def snake_case ( self ): """simple docstring""" def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 ) lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase_ = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = model_class(UpperCamelCase ) @jax.jit def model_jitted(UpperCamelCase , **UpperCamelCase ): return model(pixel_values=UpperCamelCase , **UpperCamelCase ) with self.subTest("JIT Enabled" ): lowerCamelCase_ = model_jitted(**UpperCamelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowerCamelCase_ = model_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def __snake_case ( ): lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_flax class snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self ): """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=UpperCamelCase , return_tensors="np" ) lowerCamelCase_ = model(**UpperCamelCase ) # verify the logits lowerCamelCase_ = (1, 1000) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowerCamelCase_ = jnp.array([-0.4_180, -1.5_051, -3.4_836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
675
'''simple docstring''' from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration a_ : Optional[int] = HfArgumentParser(InitializationArguments) a_ : str = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization a_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks a_ : str = { """vocab_size""": len(tokenizer), """scale_attn_by_inverse_layer_idx""": True, """reorder_and_upcast_attn""": True, } # Load model config (GPT-2 large in this case) a_ : Optional[Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config a_ : Optional[Any] = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
675
1
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. a_ : Optional[int] = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. a_ : List[str] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. a_ : Tuple = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ): lowerCamelCase_ = len([g for position, g in enumerate(UpperCAmelCase_ ) if g == main_target[position]] ) return (item, float(UpperCAmelCase_ )) def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ): lowerCamelCase_ = random.randint(0 , len(UpperCAmelCase_ ) - 1 ) lowerCamelCase_ = parent_a[:random_slice] + parent_a[random_slice:] lowerCamelCase_ = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[str] ): lowerCamelCase_ = list(UpperCAmelCase_ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowerCamelCase_ = random.choice(UpperCAmelCase_ ) return "".join(UpperCAmelCase_ ) def __snake_case ( UpperCAmelCase_ : tuple[str, float] , UpperCAmelCase_ : list[tuple[str, float]] , UpperCAmelCase_ : list[str] , ): lowerCamelCase_ = [] # Generate more children proportionally to the fitness score. lowerCamelCase_ = int(parent_a[1] * 100 ) + 1 lowerCamelCase_ = 10 if child_n >= 10 else child_n for _ in range(UpperCAmelCase_ ): lowerCamelCase_ = population_score[random.randint(0 , UpperCAmelCase_ )][0] lowerCamelCase_ ,lowerCamelCase_ = crossover(parent_a[0] , UpperCAmelCase_ ) # Append new string to the population list. pop.append(mutate(UpperCAmelCase_ , UpperCAmelCase_ ) ) pop.append(mutate(UpperCAmelCase_ , UpperCAmelCase_ ) ) return pop def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[str] , UpperCAmelCase_ : bool = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowerCamelCase_ = F'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(UpperCAmelCase_ ) # Verify that the target contains no genes besides the ones inside genes variable. lowerCamelCase_ = sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowerCamelCase_ = F'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(UpperCAmelCase_ ) # Generate random starting population. lowerCamelCase_ = [] for _ in range(UpperCAmelCase_ ): population.append("".join([random.choice(UpperCAmelCase_ ) for i in range(len(UpperCAmelCase_ ) )] ) ) # Just some logs to know what the algorithms is doing. lowerCamelCase_ ,lowerCamelCase_ = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(UpperCAmelCase_ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowerCamelCase_ = [evaluate(UpperCAmelCase_ , UpperCAmelCase_ ) for item in population] # Check if there is a matching evolution. lowerCamelCase_ = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : x[1] , reverse=UpperCAmelCase_ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'''\nGeneration: {generation}''' F'''\nTotal Population:{total_population}''' F'''\nBest score: {population_score[0][1]}''' F'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowerCamelCase_ = population[: int(N_POPULATION / 3 )] population.clear() population.extend(UpperCAmelCase_ ) # Normalize population score to be between 0 and 1. lowerCamelCase_ = [ (item, score / len(UpperCAmelCase_ )) for item, score in population_score ] # This is selection for i in range(UpperCAmelCase_ ): population.extend(select(population_score[int(UpperCAmelCase_ )] , UpperCAmelCase_ , UpperCAmelCase_ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(UpperCAmelCase_ ) > N_POPULATION: break if __name__ == "__main__": a_ : Optional[Any] = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) a_ : Optional[Any] = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) a_ , a_ , a_ : Optional[Any] = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
675
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version a_ : List[Any] = get_logger(__name__) class snake_case : """simple docstring""" _lowerCamelCase = "dummy_data" _lowerCamelCase = "datasets" _lowerCamelCase = False def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = True , UpperCamelCase = None , ): """simple docstring""" lowerCamelCase_ = 0 lowerCamelCase_ = dataset_name lowerCamelCase_ = cache_dir lowerCamelCase_ = use_local_dummy_data lowerCamelCase_ = config # download_callbacks take a single url as input lowerCamelCase_ = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root lowerCamelCase_ = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general lowerCamelCase_ = str(UpperCamelCase ) # to be downloaded lowerCamelCase_ = None lowerCamelCase_ = None @property def snake_case ( self ): """simple docstring""" if self._dummy_file is None: lowerCamelCase_ = self.download_dummy_data() return self._dummy_file @property def snake_case ( self ): """simple docstring""" if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def snake_case ( self ): """simple docstring""" return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) lowerCamelCase_ = cached_path( UpperCamelCase , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase , force_extract=UpperCamelCase ) return os.path.join(UpperCamelCase , self.dummy_file_name ) @property def snake_case ( self ): """simple docstring""" return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def snake_case ( self ): """simple docstring""" if self._bucket_url is None: lowerCamelCase_ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def snake_case ( self ): """simple docstring""" # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def snake_case ( self , UpperCamelCase , *UpperCamelCase ): """simple docstring""" if self.load_existing_dummy_data: # dummy data is downloaded and tested lowerCamelCase_ = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned lowerCamelCase_ = self.dummy_file_name # special case when data_url is a dict if isinstance(UpperCamelCase , UpperCamelCase ): return self.create_dummy_data_dict(UpperCamelCase , UpperCamelCase ) elif isinstance(UpperCamelCase , (list, tuple) ): return self.create_dummy_data_list(UpperCamelCase , UpperCamelCase ) else: return self.create_dummy_data_single(UpperCamelCase , UpperCamelCase ) def snake_case ( self , UpperCamelCase , *UpperCamelCase ): """simple docstring""" return self.download_and_extract(UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" return self.download_and_extract(UpperCamelCase ) def snake_case ( self , UpperCamelCase , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return path def snake_case ( self ): """simple docstring""" return {} def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(UpperCamelCase , UpperCamelCase ): for single_url in single_urls: download_callback(UpperCamelCase ) else: lowerCamelCase_ = single_urls download_callback(UpperCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = [os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) ) for x in single_urls] else: lowerCamelCase_ = single_urls lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) ) lowerCamelCase_ = value # make sure that values are unique if all(isinstance(UpperCamelCase , UpperCamelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique lowerCamelCase_ = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one lowerCamelCase_ = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , UpperCamelCase ) ) for url in data_url ) lowerCamelCase_ = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): lowerCamelCase_ = [data_url[0]] * len(UpperCamelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(UpperCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(UpperCamelCase ) return dummy_data_list def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" for download_callback in self.download_callbacks: download_callback(UpperCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(UpperCamelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" pass def snake_case ( self , UpperCamelCase ): """simple docstring""" def _iter_archive_members(UpperCamelCase ): # this preserves the order of the members inside the ZIP archive lowerCamelCase_ = Path(self.dummy_file ).parent lowerCamelCase_ = path.relative_to(UpperCamelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: lowerCamelCase_ = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(UpperCamelCase ) lowerCamelCase_ = Path(UpperCamelCase ) lowerCamelCase_ = _iter_archive_members(UpperCamelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(UpperCamelCase ).as_posix(), file_path.open("rb" ) def snake_case ( self , UpperCamelCase ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = [paths] for path in paths: if os.path.isfile(UpperCamelCase ): if os.path.basename(UpperCamelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(UpperCamelCase ): if os.path.basename(UpperCamelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(UpperCamelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(UpperCamelCase , UpperCamelCase )
675
1
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder a_ : Optional[int] = """__DUMMY_TRANSFORMERS_USER__""" a_ : Optional[Any] = """Dummy User""" a_ : Any = """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""" a_ : Optional[int] = """https://hub-ci.huggingface.co""" a_ : Any = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}""" a_ : Optional[int] = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}""" a_ : List[Any] = Path("""~/.huggingface/hub_ci_token""").expanduser() @pytest.fixture def __snake_case ( UpperCAmelCase_ : str ): monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , UpperCAmelCase_ ) @pytest.fixture def __snake_case ( UpperCAmelCase_ : str ): monkeypatch.setattr("datasets.config.HF_ENDPOINT" , UpperCAmelCase_ ) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , UpperCAmelCase_ ) @pytest.fixture def __snake_case ( UpperCAmelCase_ : str ): monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , UpperCAmelCase_ ) @pytest.fixture def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ): HfFolder.save_token(UpperCAmelCase_ ) yield HfFolder.delete_token() @pytest.fixture(scope="session" ) def __snake_case ( ): return HfApi(endpoint=UpperCAmelCase_ ) @pytest.fixture(scope="session" ) def __snake_case ( UpperCAmelCase_ : HfApi ): lowerCamelCase_ = HfFolder.get_token() HfFolder.save_token(UpperCAmelCase_ ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(UpperCAmelCase_ ) @pytest.fixture def __snake_case ( UpperCAmelCase_ : str ): def _cleanup_repo(UpperCAmelCase_ : Tuple ): hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type="dataset" ) return _cleanup_repo @pytest.fixture def __snake_case ( UpperCAmelCase_ : Union[str, Any] ): @contextmanager def _temporary_repo(UpperCAmelCase_ : Dict ): try: yield repo_id finally: cleanup_repo(UpperCAmelCase_ ) return _temporary_repo @pytest.fixture(scope="session" ) def __snake_case ( UpperCAmelCase_ : HfApi , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] ): lowerCamelCase_ = F'''repo_txt_data-{int(time.time() * 1_0E3 )}''' lowerCamelCase_ = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type="dataset" , private=UpperCAmelCase_ ) hf_api.upload_file( token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo="data/text_data.txt" , repo_id=UpperCAmelCase_ , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ): return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session" ) def __snake_case ( UpperCAmelCase_ : HfApi , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] ): lowerCamelCase_ = F'''repo_zipped_txt_data-{int(time.time() * 1_0E3 )}''' lowerCamelCase_ = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type="dataset" , private=UpperCAmelCase_ ) hf_api.upload_file( token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo="data.zip" , repo_id=UpperCAmelCase_ , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] ): return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session" ) def __snake_case ( UpperCAmelCase_ : HfApi , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ): lowerCamelCase_ = F'''repo_zipped_img_data-{int(time.time() * 1_0E3 )}''' lowerCamelCase_ = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type="dataset" , private=UpperCAmelCase_ ) hf_api.upload_file( token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo="data.zip" , repo_id=UpperCAmelCase_ , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] ): return hf_private_dataset_repo_zipped_img_data_
675
'''simple docstring''' import os def __snake_case ( UpperCAmelCase_ : str = "matrix.txt" ): with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as in_file: lowerCamelCase_ = in_file.read() lowerCamelCase_ = [[int(UpperCAmelCase_ ) for cell in row.split("," )] for row in data.strip().splitlines()] lowerCamelCase_ = [[0 for cell in row] for row in grid] lowerCamelCase_ = len(grid[0] ) lowerCamelCase_ = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )] lowerCamelCase_ = grid[0][0] for i in range(1 , UpperCAmelCase_ ): lowerCamelCase_ = grid[0][i] + dp[0][i - 1] for i in range(1 , UpperCAmelCase_ ): lowerCamelCase_ = grid[i][0] + dp[i - 1][0] for i in range(1 , UpperCAmelCase_ ): for j in range(1 , UpperCAmelCase_ ): lowerCamelCase_ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(f'''{solution() = }''')
675
1
'''simple docstring''' from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) a_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name a_ : Tuple = """ Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)[\"depth\"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline(\"depth-estimation\") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to(\"cuda\") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to(\"cuda\") >>> img = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/cat.png\" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\") >>> prompt = \"A robot, 4k photo\" >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\" >>> generator = torch.Generator(device=\"cuda\").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save(\"robot_cat.png\") ``` """ def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=8 ): lowerCamelCase_ = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 lowerCamelCase_ = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , ): """simple docstring""" super().__init__() self.register_modules( unet=UpperCamelCase , scheduler=UpperCamelCase , movq=UpperCamelCase , ) lowerCamelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" if latents is None: lowerCamelCase_ = randn_tensor(UpperCamelCase , generator=UpperCamelCase , device=UpperCamelCase , dtype=UpperCamelCase ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) lowerCamelCase_ = latents.to(UpperCamelCase ) lowerCamelCase_ = latents * scheduler.init_noise_sigma return latents def snake_case ( self , UpperCamelCase=0 ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) lowerCamelCase_ = torch.device(f'''cuda:{gpu_id}''' ) lowerCamelCase_ = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCamelCase , UpperCamelCase ) def snake_case ( self , UpperCamelCase=0 ): """simple docstring""" if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) lowerCamelCase_ = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=UpperCamelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowerCamelCase_ = None for cpu_offloaded_model in [self.unet, self.movq]: lowerCamelCase_ ,lowerCamelCase_ = cpu_offload_with_hook(UpperCamelCase , UpperCamelCase , prev_module_hook=UpperCamelCase ) # We'll offload the last model manually. lowerCamelCase_ = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def snake_case ( self ): """simple docstring""" if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(UpperCamelCase , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(UpperCamelCase ) def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 512 , UpperCamelCase = 512 , UpperCamelCase = 100 , UpperCamelCase = 4.0 , UpperCamelCase = 1 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , ): """simple docstring""" lowerCamelCase_ = self._execution_device lowerCamelCase_ = guidance_scale > 1.0 if isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = torch.cat(UpperCamelCase , dim=0 ) if isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = torch.cat(UpperCamelCase , dim=0 ) if isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = torch.cat(UpperCamelCase , dim=0 ) lowerCamelCase_ = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: lowerCamelCase_ = image_embeds.repeat_interleave(UpperCamelCase , dim=0 ) lowerCamelCase_ = negative_image_embeds.repeat_interleave(UpperCamelCase , dim=0 ) lowerCamelCase_ = hint.repeat_interleave(UpperCamelCase , dim=0 ) lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase ) lowerCamelCase_ = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase ) self.scheduler.set_timesteps(UpperCamelCase , device=UpperCamelCase ) lowerCamelCase_ = self.scheduler.timesteps lowerCamelCase_ = self.movq.config.latent_channels lowerCamelCase_ ,lowerCamelCase_ = downscale_height_and_width(UpperCamelCase , UpperCamelCase , self.movq_scale_factor ) # create initial latent lowerCamelCase_ = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase , UpperCamelCase , UpperCamelCase , self.scheduler , ) for i, t in enumerate(self.progress_bar(UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowerCamelCase_ = {"image_embeds": image_embeds, "hint": hint} lowerCamelCase_ = self.unet( sample=UpperCamelCase , timestep=UpperCamelCase , encoder_hidden_states=UpperCamelCase , added_cond_kwargs=UpperCamelCase , return_dict=UpperCamelCase , )[0] if do_classifier_free_guidance: lowerCamelCase_ ,lowerCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 ) lowerCamelCase_ ,lowerCamelCase_ = noise_pred.chunk(2 ) lowerCamelCase_ ,lowerCamelCase_ = variance_pred.chunk(2 ) lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowerCamelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowerCamelCase_ ,lowerCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase_ = self.scheduler.step( UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase , )[0] # post-processing lowerCamelCase_ = self.movq.decode(UpperCamelCase , force_not_quantize=UpperCamelCase )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: lowerCamelCase_ = image * 0.5 + 0.5 lowerCamelCase_ = image.clamp(0 , 1 ) lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowerCamelCase_ = self.numpy_to_pil(UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase )
675
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = inspect.getfile(accelerate.test_utils ) lowerCamelCase_ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 lowerCamelCase_ = test_metrics @require_cpu def snake_case ( self ): """simple docstring""" debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def snake_case ( self ): """simple docstring""" debug_launcher(self.test_metrics.main ) @require_single_gpu def snake_case ( self ): """simple docstring""" self.test_metrics.main() @require_multi_gpu def snake_case ( self ): """simple docstring""" print(f'''Found {torch.cuda.device_count()} devices.''' ) lowerCamelCase_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
675
1
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features a_ : int = logging.get_logger(__name__) a_ : Dict = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) a_ : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class snake_case : """simple docstring""" _lowerCamelCase = field( default=lowercase , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase )} ) _lowerCamelCase = field( default=lowercase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} ) _lowerCamelCase = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) _lowerCamelCase = field( default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) _lowerCamelCase = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) _lowerCamelCase = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) _lowerCamelCase = field( default=lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} ) _lowerCamelCase = field( default=lowercase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} ) _lowerCamelCase = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) _lowerCamelCase = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) _lowerCamelCase = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) _lowerCamelCase = field(default=1 , metadata={"help": "multiple threads for converting example to features"} ) class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = "train" _lowerCamelCase = "dev" class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = 42 _lowerCamelCase = 42 _lowerCamelCase = 42 _lowerCamelCase = 42 def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = Split.train , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = "pt" , ): """simple docstring""" lowerCamelCase_ = args lowerCamelCase_ = is_language_sensitive lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(UpperCamelCase , UpperCamelCase ): try: lowerCamelCase_ = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) lowerCamelCase_ = mode # Load data features from cache or dataset file lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1" lowerCamelCase_ = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase_ = cached_features_file + ".lock" with FileLock(UpperCamelCase ): if os.path.exists(UpperCamelCase ) and not args.overwrite_cache: lowerCamelCase_ = time.time() lowerCamelCase_ = torch.load(UpperCamelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase_ = self.old_features["features"] lowerCamelCase_ = self.old_features.get("dataset" , UpperCamelCase ) lowerCamelCase_ = self.old_features.get("examples" , UpperCamelCase ) logger.info( f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' " future run" ) else: if mode == Split.dev: lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase_ = self.processor.get_train_examples(args.data_dir ) lowerCamelCase_ ,lowerCamelCase_ = squad_convert_examples_to_features( examples=self.examples , tokenizer=UpperCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=UpperCamelCase , ) lowerCamelCase_ = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} , UpperCamelCase , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self , UpperCamelCase ): """simple docstring""" # Convert to Tensors and build dataset lowerCamelCase_ = self.features[i] lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long ) lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long ) lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long ) lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long ) lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float ) lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float ) lowerCamelCase_ = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long ) lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
675
'''simple docstring''' from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge a_ : Any = [ """Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the""" """ final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe""" """ depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""", """The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal""" """ accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's""" """ founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the""" """ body.""", """Amnesty International releases its annual report on the death penalty. The report catalogs the use of""" """ state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the""" """ world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital""" """ punishment.""", ] a_ : Optional[Any] = [ """Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""" """ Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz""" """ had informed his Lufthansa training school of an episode of severe depression, airline says .""", """Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .""" """ Israel and the United States opposed the move, which could open the door to war crimes investigations against""" """ Israelis .""", """Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to""" """ death . Organization claims that governments around the world are using the threat of terrorism to advance""" """ executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death""" """ sentences up by 28% .""", ] def __snake_case ( ): lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2", "rougeL"] ) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2"] ) assert ( pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean() ) def __snake_case ( ): lowerCamelCase_ = "rougeLsum" lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k] lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k] assert score > score_no_sep def __snake_case ( ): lowerCamelCase_ = ["rouge1", "rouge2", "rougeL"] lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ ) lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ ) assert score_sep == score_no_sep def __snake_case ( ): lowerCamelCase_ = [ "Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.", "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .", ] lowerCamelCase_ = [ "Margot Frank, died in 1945, a month earlier than previously thought.", "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of" " the final seconds on board Flight 9525.", ] assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) def __snake_case ( ): lowerCamelCase_ = [ "\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" " ] lowerCamelCase_ = [ " Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ." ] lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] , newline_sep=UpperCAmelCase_ )["rougeLsum"] lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] )["rougeLsum"] assert new_score > prev_score def __snake_case ( ): lowerCamelCase_ = Path("examples/seq2seq/test_data/wmt_en_ro" ) lowerCamelCase_ = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) ) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = calculate_rouge_path( data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=UpperCAmelCase_ ) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
675
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a_ : Optional[Any] = logging.get_logger(__name__) a_ : List[Any] = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def __snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any ): for attribute in key.split("." ): lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) if weight_type is not None: lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape else: lowerCamelCase_ = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase_ = value elif weight_type == "weight_g": lowerCamelCase_ = value elif weight_type == "weight_v": lowerCamelCase_ = value elif weight_type == "bias": lowerCamelCase_ = value else: lowerCamelCase_ = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : str ): lowerCamelCase_ = [] lowerCamelCase_ = fairseq_model.state_dict() lowerCamelCase_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase_ = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == "group" , ) lowerCamelCase_ = True else: for key, mapped_key in MAPPING.items(): lowerCamelCase_ = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: lowerCamelCase_ = True if "*" in mapped_key: lowerCamelCase_ = name.split(UpperCAmelCase_ )[0].split("." )[-2] lowerCamelCase_ = mapped_key.replace("*" , UpperCAmelCase_ ) if "weight_g" in name: lowerCamelCase_ = "weight_g" elif "weight_v" in name: lowerCamelCase_ = "weight_v" elif "weight" in name: lowerCamelCase_ = "weight" elif "bias" in name: lowerCamelCase_ = "bias" else: lowerCamelCase_ = None set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) continue if not is_used: unused_weights.append(UpperCAmelCase_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] ): lowerCamelCase_ = full_name.split("conv_layers." )[-1] lowerCamelCase_ = name.split("." ) lowerCamelCase_ = int(items[0] ) lowerCamelCase_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowerCamelCase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowerCamelCase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowerCamelCase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowerCamelCase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCAmelCase_ ) def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] ): lowerCamelCase_ = SEWConfig() if is_finetuned: lowerCamelCase_ = model.wav_encoder.wav_model.cfg else: lowerCamelCase_ = model.cfg lowerCamelCase_ = fs_config.conv_bias lowerCamelCase_ = eval(fs_config.conv_feature_layers ) lowerCamelCase_ = [x[0] for x in conv_layers] lowerCamelCase_ = [x[1] for x in conv_layers] lowerCamelCase_ = [x[2] for x in conv_layers] lowerCamelCase_ = "gelu" lowerCamelCase_ = "layer" if fs_config.extractor_mode == "layer_norm" else "group" lowerCamelCase_ = 0.0 lowerCamelCase_ = fs_config.activation_fn.name lowerCamelCase_ = fs_config.encoder_embed_dim lowerCamelCase_ = 0.02 lowerCamelCase_ = fs_config.encoder_ffn_embed_dim lowerCamelCase_ = 1E-5 lowerCamelCase_ = fs_config.encoder_layerdrop lowerCamelCase_ = fs_config.encoder_attention_heads lowerCamelCase_ = fs_config.conv_pos_groups lowerCamelCase_ = fs_config.conv_pos lowerCamelCase_ = len(UpperCAmelCase_ ) lowerCamelCase_ = fs_config.encoder_layers lowerCamelCase_ = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: lowerCamelCase_ = model.cfg lowerCamelCase_ = fs_config.final_dropout lowerCamelCase_ = fs_config.layerdrop lowerCamelCase_ = fs_config.activation_dropout lowerCamelCase_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 lowerCamelCase_ = fs_config.attention_dropout lowerCamelCase_ = fs_config.dropout_input lowerCamelCase_ = fs_config.dropout lowerCamelCase_ = fs_config.mask_channel_length lowerCamelCase_ = fs_config.mask_channel_prob lowerCamelCase_ = fs_config.mask_length lowerCamelCase_ = fs_config.mask_prob lowerCamelCase_ = "Wav2Vec2FeatureExtractor" lowerCamelCase_ = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=True ): if is_finetuned: lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: lowerCamelCase_ = SEWConfig.from_pretrained(UpperCAmelCase_ ) else: lowerCamelCase_ = convert_config(model[0] , UpperCAmelCase_ ) lowerCamelCase_ = model[0].eval() lowerCamelCase_ = True if config.feat_extract_norm == "layer" else False lowerCamelCase_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , ) if is_finetuned: if dict_path: lowerCamelCase_ = Dictionary.load(UpperCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCamelCase_ = target_dict.pad_index lowerCamelCase_ = target_dict.bos_index lowerCamelCase_ = target_dict.pad_index lowerCamelCase_ = target_dict.bos_index lowerCamelCase_ = target_dict.eos_index lowerCamelCase_ = len(target_dict.symbols ) lowerCamelCase_ = os.path.join(UpperCAmelCase_ , "vocab.json" ) if not os.path.isdir(UpperCAmelCase_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase_ ) ) return os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices , UpperCAmelCase_ ) lowerCamelCase_ = WavaVecaCTCTokenizer( UpperCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCAmelCase_ , ) lowerCamelCase_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ ) processor.save_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = SEWForCTC(UpperCAmelCase_ ) else: lowerCamelCase_ = SEWModel(UpperCAmelCase_ ) feature_extractor.save_pretrained(UpperCAmelCase_ ) recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) hf_model.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": a_ : Tuple = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) a_ : Any = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
675
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() a_ : Optional[Any] = logging.get_logger("""transformers.models.encodec""") a_ : List[str] = { """quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""", """quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""", """quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""", """quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""", } a_ : Optional[int] = { """encoder.model.0.conv.conv""": """encoder.layers.0.conv""", """encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""", """encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""", """encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""", """encoder.model.3.conv.conv""": """encoder.layers.3.conv""", """encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""", """encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""", """encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""", """encoder.model.6.conv.conv""": """encoder.layers.6.conv""", """encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""", """encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""", """encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""", """encoder.model.9.conv.conv""": """encoder.layers.9.conv""", """encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""", """encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""", """encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""", """encoder.model.12.conv.conv""": """encoder.layers.12.conv""", """encoder.model.13.lstm""": """encoder.layers.13.lstm""", """encoder.model.15.conv.conv""": """encoder.layers.15.conv""", } a_ : Tuple = { """encoder.model.0.conv.norm""": """encoder.layers.0.norm""", """encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""", """encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""", """encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""", """encoder.model.3.conv.norm""": """encoder.layers.3.norm""", """encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""", """encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""", """encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""", """encoder.model.6.conv.norm""": """encoder.layers.6.norm""", """encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""", """encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""", """encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""", """encoder.model.9.conv.norm""": """encoder.layers.9.norm""", """encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""", """encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""", """encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""", """encoder.model.12.conv.norm""": """encoder.layers.12.norm""", """encoder.model.15.conv.norm""": """encoder.layers.15.norm""", } a_ : Union[str, Any] = { """decoder.model.0.conv.conv""": """decoder.layers.0.conv""", """decoder.model.1.lstm""": """decoder.layers.1.lstm""", """decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""", """decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""", """decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""", """decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""", """decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""", """decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""", """decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""", """decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""", """decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""", """decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""", """decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""", """decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""", """decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""", """decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""", """decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""", """decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""", """decoder.model.15.conv.conv""": """decoder.layers.15.conv""", } a_ : Union[str, Any] = { """decoder.model.0.conv.norm""": """decoder.layers.0.norm""", """decoder.model.3.convtr.norm""": """decoder.layers.3.norm""", """decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""", """decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""", """decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""", """decoder.model.6.convtr.norm""": """decoder.layers.6.norm""", """decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""", """decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""", """decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""", """decoder.model.9.convtr.norm""": """decoder.layers.9.norm""", """decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""", """decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""", """decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""", """decoder.model.12.convtr.norm""": """decoder.layers.12.norm""", """decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""", """decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""", """decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""", """decoder.model.15.conv.norm""": """decoder.layers.15.norm""", } a_ : Optional[Any] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } a_ : List[str] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } a_ : Any = [] a_ : str = [] def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ): for attribute in key.split("." ): lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) if weight_type is not None: lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape else: lowerCamelCase_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase_ = value elif weight_type == "weight_g": lowerCamelCase_ = value elif weight_type == "weight_v": lowerCamelCase_ = value elif weight_type == "bias": lowerCamelCase_ = value elif weight_type == "running_mean": lowerCamelCase_ = value elif weight_type == "running_var": lowerCamelCase_ = value elif weight_type == "num_batches_tracked": lowerCamelCase_ = value elif weight_type == "weight_ih_l0": lowerCamelCase_ = value elif weight_type == "weight_hh_l0": lowerCamelCase_ = value elif weight_type == "bias_ih_l0": lowerCamelCase_ = value elif weight_type == "bias_hh_l0": lowerCamelCase_ = value elif weight_type == "weight_ih_l1": lowerCamelCase_ = value elif weight_type == "weight_hh_l1": lowerCamelCase_ = value elif weight_type == "bias_ih_l1": lowerCamelCase_ = value elif weight_type == "bias_hh_l1": lowerCamelCase_ = value else: lowerCamelCase_ = value logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ): for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ): lowerCamelCase_ = [] if model_name == "encodec_24khz" or "encodec_32khz": lowerCamelCase_ = MAPPING_24K elif model_name == "encodec_48khz": lowerCamelCase_ = MAPPING_48K else: raise ValueError(F'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(UpperCAmelCase_ , UpperCAmelCase_ ): logger.info(F'''{name} was ignored''' ) continue lowerCamelCase_ = False for key, mapped_key in MAPPING.items(): if "*" in key: lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." ) if prefix in name and suffix in name: lowerCamelCase_ = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("embed" ) and name.endswith("embed_avg" ): continue lowerCamelCase_ = True if "*" in mapped_key: lowerCamelCase_ = name.split(UpperCAmelCase_ )[0].split("." )[-2] lowerCamelCase_ = mapped_key.replace("*" , UpperCAmelCase_ ) if "weight_g" in name: lowerCamelCase_ = "weight_g" elif "weight_v" in name: lowerCamelCase_ = "weight_v" elif "weight_ih_l0" in name: lowerCamelCase_ = "weight_ih_l0" elif "weight_hh_l0" in name: lowerCamelCase_ = "weight_hh_l0" elif "bias_ih_l0" in name: lowerCamelCase_ = "bias_ih_l0" elif "bias_hh_l0" in name: lowerCamelCase_ = "bias_hh_l0" elif "weight_ih_l1" in name: lowerCamelCase_ = "weight_ih_l1" elif "weight_hh_l1" in name: lowerCamelCase_ = "weight_hh_l1" elif "bias_ih_l1" in name: lowerCamelCase_ = "bias_ih_l1" elif "bias_hh_l1" in name: lowerCamelCase_ = "bias_hh_l1" elif "bias" in name: lowerCamelCase_ = "bias" elif "weight" in name: lowerCamelCase_ = "weight" elif "running_mean" in name: lowerCamelCase_ = "running_mean" elif "running_var" in name: lowerCamelCase_ = "running_var" elif "num_batches_tracked" in name: lowerCamelCase_ = "num_batches_tracked" else: lowerCamelCase_ = None set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) continue if not is_used: unused_weights.append(UpperCAmelCase_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) @torch.no_grad() def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , ): if config_path is not None: lowerCamelCase_ = EncodecConfig.from_pretrained(UpperCAmelCase_ ) else: lowerCamelCase_ = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": lowerCamelCase_ = [8, 5, 4, 4] lowerCamelCase_ = [2.2] lowerCamelCase_ = 64 lowerCamelCase_ = 32000 lowerCamelCase_ = 2048 lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False elif model_name == "encodec_48khz": lowerCamelCase_ = [8, 5, 4, 2] lowerCamelCase_ = [3.0, 6.0, 12.0, 24.0] lowerCamelCase_ = 48000 lowerCamelCase_ = 2 lowerCamelCase_ = False lowerCamelCase_ = "time_group_norm" lowerCamelCase_ = True lowerCamelCase_ = 1.0 lowerCamelCase_ = 0.01 else: raise ValueError(F'''Unknown model name: {model_name}''' ) lowerCamelCase_ = EncodecModel(UpperCAmelCase_ ) lowerCamelCase_ = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = torch.load(UpperCAmelCase_ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights lowerCamelCase_ = original_checkpoint["best_state"] recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) model.save_pretrained(UpperCAmelCase_ ) if repo_id: print("Pushing to the hub..." ) feature_extractor.push_to_hub(UpperCAmelCase_ ) model.push_to_hub(UpperCAmelCase_ ) if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() parser.add_argument( """--model""", default="""encodec_24khz""", type=str, help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""", ) parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) a_ : str = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
675
1
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = inspect.getfile(accelerate.test_utils ) lowerCamelCase_ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 lowerCamelCase_ = test_metrics @require_cpu def snake_case ( self ): """simple docstring""" debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def snake_case ( self ): """simple docstring""" debug_launcher(self.test_metrics.main ) @require_single_gpu def snake_case ( self ): """simple docstring""" self.test_metrics.main() @require_multi_gpu def snake_case ( self ): """simple docstring""" print(f'''Found {torch.cuda.device_count()} devices.''' ) lowerCamelCase_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
675
'''simple docstring''' from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = "arrow" , **UpperCamelCase , ): """simple docstring""" super().__init__( split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , **UpperCamelCase , ) lowerCamelCase_ = load_from_cache_file lowerCamelCase_ = file_format lowerCamelCase_ = Spark( df=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , working_dir=UpperCamelCase , **UpperCamelCase , ) def snake_case ( self ): """simple docstring""" if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCamelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
675
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Union[str, Any] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Tuple = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys a_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
675
'''simple docstring''' def __snake_case ( ): lowerCamelCase_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] lowerCamelCase_ = 6 lowerCamelCase_ = 1 lowerCamelCase_ = 1901 lowerCamelCase_ = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 lowerCamelCase_ = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 lowerCamelCase_ = day - 29 else: if day > days_per_month[month - 1]: month += 1 lowerCamelCase_ = day - days_per_month[month - 2] if month > 12: year += 1 lowerCamelCase_ = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
675
1
'''simple docstring''' import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = 0 @slow def snake_case ( self ): """simple docstring""" for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) self.assertIsInstance(UpperCamelCase , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(UpperCamelCase ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) self.assertIsInstance(UpperCamelCase , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(UpperCamelCase ) , 0 ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase ) self.assertIsInstance(UpperCamelCase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase ) self.assertIsInstance(UpperCamelCase , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = AutoConfig.from_pretrained(UpperCamelCase ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) # Check that tokenizer_type ≠ model_type lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase , config=UpperCamelCase ) self.assertIsInstance(UpperCamelCase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def snake_case ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(UpperCamelCase , "vocab.txt" ) ) lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase , tokenizer_type="bert" , use_fast=UpperCamelCase ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.json" , os.path.join(UpperCamelCase , "vocab.json" ) ) shutil.copy("./tests/fixtures/merges.txt" , os.path.join(UpperCamelCase , "merges.txt" ) ) lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase , tokenizer_type="gpt2" , use_fast=UpperCamelCase ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) @require_tokenizers def snake_case ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(UpperCamelCase , "vocab.txt" ) ) lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase , tokenizer_type="bert" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.json" , os.path.join(UpperCamelCase , "vocab.json" ) ) shutil.copy("./tests/fixtures/merges.txt" , os.path.join(UpperCamelCase , "merges.txt" ) ) lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase , tokenizer_type="gpt2" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" with pytest.raises(UpperCamelCase ): AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" ) @require_tokenizers def snake_case ( self ): """simple docstring""" for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: lowerCamelCase_ = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" ) self.assertIsInstance(UpperCamelCase , (BertTokenizer, BertTokenizerFast) ) if isinstance(UpperCamelCase , UpperCamelCase ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase ) else: self.assertEqual(tokenizer.do_lower_case , UpperCamelCase ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def snake_case ( self ): """simple docstring""" for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( UpperCamelCase , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ): lowerCamelCase_ = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" ) def snake_case ( self ): """simple docstring""" # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai lowerCamelCase_ = TOKENIZER_MAPPING.values() lowerCamelCase_ = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(UpperCamelCase ) @require_tokenizers def snake_case ( self ): """simple docstring""" self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=UpperCamelCase ) , UpperCamelCase ) self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , UpperCamelCase ) @require_tokenizers def snake_case ( self ): """simple docstring""" lowerCamelCase_ = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=UpperCamelCase ) lowerCamelCase_ = "Hello, world. How are you?" lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase ) self.assertEqual("[UNK]" , tokens[0] ) lowerCamelCase_ = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=UpperCamelCase ) lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase ) self.assertEqual("[UNK]" , tokens[0] ) @require_tokenizers def snake_case ( self ): """simple docstring""" lowerCamelCase_ = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" ) self.assertEqual(type(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 3_0000 ) self.assertEqual(tokenizer.unk_token , "[UNK]" ) self.assertEqual(tokenizer.padding_side , "right" ) self.assertEqual(tokenizer.truncation_side , "right" ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase ) self.assertIsInstance(UpperCamelCase , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase ) lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase ) self.assertIsInstance(UpperCamelCase , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = AutoTokenizer.from_pretrained("ctrl" ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" # Check we can load the tokenizer config of an online model. lowerCamelCase_ = get_tokenizer_config("bert-base-cased" ) lowerCamelCase_ = config.pop("_commit_hash" , UpperCamelCase ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(UpperCamelCase , {"do_lower_case": False} ) # This model does not have a tokenizer_config so we get back an empty dict. lowerCamelCase_ = get_tokenizer_config(UpperCamelCase ) self.assertDictEqual(UpperCamelCase , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase ) lowerCamelCase_ = get_tokenizer_config(UpperCamelCase ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config["tokenizer_class"] , "BertTokenizer" ) def snake_case ( self ): """simple docstring""" try: AutoConfig.register("custom" , UpperCamelCase ) AutoTokenizer.register(UpperCamelCase , slow_tokenizer_class=UpperCamelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase ): AutoTokenizer.register(UpperCamelCase , slow_tokenizer_class=UpperCamelCase ) lowerCamelCase_ = CustomTokenizer.from_pretrained(UpperCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase ) lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def snake_case ( self ): """simple docstring""" try: AutoConfig.register("custom" , UpperCamelCase ) # Can register in two steps AutoTokenizer.register(UpperCamelCase , slow_tokenizer_class=UpperCamelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(UpperCamelCase , fast_tokenizer_class=UpperCamelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( UpperCamelCase , slow_tokenizer_class=UpperCamelCase , fast_tokenizer_class=UpperCamelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase ): AutoTokenizer.register(UpperCamelCase , fast_tokenizer_class=UpperCamelCase ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase_ = BertTokenizerFast.from_pretrained(UpperCamelCase ) bert_tokenizer.save_pretrained(UpperCamelCase ) lowerCamelCase_ = CustomTokenizerFast.from_pretrained(UpperCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase ) lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase , use_fast=UpperCamelCase ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def snake_case ( self ): """simple docstring""" # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(UpperCamelCase ): lowerCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCamelCase ): lowerCamelCase_ = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase ) lowerCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase ) lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase , trust_remote_code=UpperCamelCase ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" ) # Test we can also load the slow version lowerCamelCase_ = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase , use_fast=UpperCamelCase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase ) lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase , trust_remote_code=UpperCamelCase , use_fast=UpperCamelCase ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" ) @require_tokenizers def snake_case ( self ): """simple docstring""" class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = False class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = NewTokenizer _lowerCamelCase = False try: AutoConfig.register("custom" , UpperCamelCase ) AutoTokenizer.register(UpperCamelCase , slow_tokenizer_class=UpperCamelCase ) AutoTokenizer.register(UpperCamelCase , fast_tokenizer_class=UpperCamelCase ) # If remote code is not set, the default is to use local lowerCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) self.assertFalse(tokenizer.special_attribute_present ) lowerCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=UpperCamelCase ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. lowerCamelCase_ = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) self.assertFalse(tokenizer.special_attribute_present ) lowerCamelCase_ = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase , use_fast=UpperCamelCase ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub lowerCamelCase_ = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) self.assertTrue(tokenizer.special_attribute_present ) lowerCamelCase_ = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=UpperCamelCase , use_fast=UpperCamelCase ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def snake_case ( self ): """simple docstring""" lowerCamelCase_ = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=UpperCamelCase ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) # Test we can also load the slow version lowerCamelCase_ = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=UpperCamelCase , use_fast=UpperCamelCase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) else: self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) def snake_case ( self ): """simple docstring""" with self.assertRaisesRegex( UpperCamelCase , "bert-base is not a local folder and is not a valid model identifier" ): lowerCamelCase_ = AutoTokenizer.from_pretrained("bert-base" ) def snake_case ( self ): """simple docstring""" with self.assertRaisesRegex( UpperCamelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase , revision="aaaaaa" ) def snake_case ( self ): """simple docstring""" # Make sure we have cached the tokenizer. lowerCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) with RequestCounter() as counter: lowerCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
675
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a_ : Optional[int] = logging.get_logger(__name__) a_ : Dict = { """SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""", # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = "deformable_detr" _lowerCamelCase = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=3 , UpperCamelCase=300 , UpperCamelCase=1024 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=0.0 , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=256 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=1.0 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="sine" , UpperCamelCase="resnet50" , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=False , UpperCamelCase=300 , UpperCamelCase=False , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=0.1 , UpperCamelCase=0.25 , UpperCamelCase=False , **UpperCamelCase , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) lowerCamelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = backbone_config.get("model_type" ) lowerCamelCase_ = CONFIG_MAPPING[backbone_model_type] lowerCamelCase_ = config_class.from_dict(UpperCamelCase ) lowerCamelCase_ = use_timm_backbone lowerCamelCase_ = backbone_config lowerCamelCase_ = num_channels lowerCamelCase_ = num_queries lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = d_model lowerCamelCase_ = encoder_ffn_dim lowerCamelCase_ = encoder_layers lowerCamelCase_ = encoder_attention_heads lowerCamelCase_ = decoder_ffn_dim lowerCamelCase_ = decoder_layers lowerCamelCase_ = decoder_attention_heads lowerCamelCase_ = dropout lowerCamelCase_ = attention_dropout lowerCamelCase_ = activation_dropout lowerCamelCase_ = activation_function lowerCamelCase_ = init_std lowerCamelCase_ = init_xavier_std lowerCamelCase_ = encoder_layerdrop lowerCamelCase_ = auxiliary_loss lowerCamelCase_ = position_embedding_type lowerCamelCase_ = backbone lowerCamelCase_ = use_pretrained_backbone lowerCamelCase_ = dilation # deformable attributes lowerCamelCase_ = num_feature_levels lowerCamelCase_ = encoder_n_points lowerCamelCase_ = decoder_n_points lowerCamelCase_ = two_stage lowerCamelCase_ = two_stage_num_proposals lowerCamelCase_ = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher lowerCamelCase_ = class_cost lowerCamelCase_ = bbox_cost lowerCamelCase_ = giou_cost # Loss coefficients lowerCamelCase_ = mask_loss_coefficient lowerCamelCase_ = dice_loss_coefficient lowerCamelCase_ = bbox_loss_coefficient lowerCamelCase_ = giou_loss_coefficient lowerCamelCase_ = eos_coefficient lowerCamelCase_ = focal_alpha lowerCamelCase_ = disable_custom_kernels super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase ) @property def snake_case ( self ): """simple docstring""" return self.encoder_attention_heads @property def snake_case ( self ): """simple docstring""" return self.d_model def snake_case ( self ): """simple docstring""" lowerCamelCase_ = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowerCamelCase_ = self.backbone_config.to_dict() lowerCamelCase_ = self.__class__.model_type return output
675
1
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class snake_case : """simple docstring""" _lowerCamelCase = None _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = None _lowerCamelCase = None _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = True _lowerCamelCase = None _lowerCamelCase = 1 _lowerCamelCase = None _lowerCamelCase = False _lowerCamelCase = None _lowerCamelCase = None def snake_case ( self ): """simple docstring""" return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
675
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class snake_case ( pl.LightningModule ): """simple docstring""" def __init__( self , UpperCamelCase ): """simple docstring""" super().__init__() lowerCamelCase_ = model lowerCamelCase_ = 2 lowerCamelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels ) def snake_case ( self ): """simple docstring""" pass def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str ): # load longformer model from model identifier lowerCamelCase_ = LongformerModel.from_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = LightningModel(UpperCAmelCase_ ) lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model lowerCamelCase_ = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase_ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(UpperCAmelCase_ ) print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' ) if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--longformer_model""", default=None, type=str, required=True, help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""", ) parser.add_argument( """--longformer_question_answering_ckpt_path""", default=None, type=str, required=True, help="""Path the official PyTorch Lightning Checkpoint.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Tuple = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
675
1
'''simple docstring''' def __snake_case ( UpperCAmelCase_ : int = 1000000 ): lowerCamelCase_ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , UpperCAmelCase_ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
675
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a_ : Optional[Any] = { """configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""], """tokenization_ctrl""": ["""CTRLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : str = [ """CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """CTRLForSequenceClassification""", """CTRLLMHeadModel""", """CTRLModel""", """CTRLPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ """TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCTRLForSequenceClassification""", """TFCTRLLMHeadModel""", """TFCTRLModel""", """TFCTRLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
675
1
'''simple docstring''' import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = OpenAIGPTTokenizer _lowerCamelCase = OpenAIGPTTokenizerFast _lowerCamelCase = True _lowerCamelCase = False def snake_case ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) lowerCamelCase_ = ["#version: 0.2", "l o", "lo w", "e r</w>", ""] lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(UpperCamelCase ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(UpperCamelCase ) ) def snake_case ( self , UpperCamelCase ): """simple docstring""" return "lower newer", "lower newer" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) lowerCamelCase_ = "lower" lowerCamelCase_ = ["low", "er</w>"] lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = tokens + ["<unk>"] lowerCamelCase_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase ) def snake_case ( self , UpperCamelCase=15 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) # Simple input lowerCamelCase_ = "This is a simple input" lowerCamelCase_ = ["This is a simple input 1", "This is a simple input 2"] lowerCamelCase_ = ("This is a simple input", "This is a pair") lowerCamelCase_ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Simple input self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Simple input self.assertRaises( UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , ) # Pair input self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Pair input self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Pair input self.assertRaises( UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , ) def snake_case ( self ): """simple docstring""" pass @require_ftfy @require_spacy @require_tokenizers class snake_case ( lowercase ): """simple docstring""" pass
675
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a_ : Any = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = GPTSwaTokenizer _lowerCamelCase = False _lowerCamelCase = True _lowerCamelCase = False def snake_case ( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = "This is a test" lowerCamelCase_ = "This is a test" return input_text, output_text def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "<s>" lowerCamelCase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(UpperCamelCase ) , 2000 ) def snake_case ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 2000 ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase ) lowerCamelCase_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [465, 287, 265, 631, 842] ) lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) # fmt: off self.assertListEqual( UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , ) # fmt: on lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase ) self.assertListEqual( UpperCamelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) lowerCamelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) # fmt: off self.assertListEqual( UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ) # fmt: on def snake_case ( self ): """simple docstring""" lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase ) lowerCamelCase_ = ["This is a test", "I was born in 92000, and this is falsé."] lowerCamelCase_ = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(UpperCamelCase , UpperCamelCase ): self.assertListEqual(tokenizer.encode_fast(UpperCamelCase ) , UpperCamelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(tokenizer.decode_fast(UpperCamelCase ) , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = [ "<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off lowerCamelCase_ = {"input_ids": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=UpperCamelCase , )
675
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Tuple = logging.get_logger(__name__) a_ : str = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = "sew-d" def __init__( self , UpperCamelCase=32 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase=2 , UpperCamelCase=512 , UpperCamelCase=256 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=("p2c", "c2p") , UpperCamelCase="layer_norm" , UpperCamelCase="gelu_python" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase=0.02 , UpperCamelCase=1e-7 , UpperCamelCase=1e-5 , UpperCamelCase="group" , UpperCamelCase="gelu" , UpperCamelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase=False , UpperCamelCase=128 , UpperCamelCase=16 , UpperCamelCase=True , UpperCamelCase=0.05 , UpperCamelCase=10 , UpperCamelCase=2 , UpperCamelCase=0.0 , UpperCamelCase=10 , UpperCamelCase=0 , UpperCamelCase="mean" , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=256 , UpperCamelCase=0 , UpperCamelCase=1 , UpperCamelCase=2 , **UpperCamelCase , ): """simple docstring""" super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase ) lowerCamelCase_ = hidden_size lowerCamelCase_ = feat_extract_norm lowerCamelCase_ = feat_extract_activation lowerCamelCase_ = list(UpperCamelCase ) lowerCamelCase_ = list(UpperCamelCase ) lowerCamelCase_ = list(UpperCamelCase ) lowerCamelCase_ = conv_bias lowerCamelCase_ = num_conv_pos_embeddings lowerCamelCase_ = num_conv_pos_embedding_groups lowerCamelCase_ = len(self.conv_dim ) lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = intermediate_size lowerCamelCase_ = squeeze_factor lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = position_buckets lowerCamelCase_ = share_att_key lowerCamelCase_ = relative_attention lowerCamelCase_ = norm_rel_ebd lowerCamelCase_ = list(UpperCamelCase ) lowerCamelCase_ = hidden_act lowerCamelCase_ = num_attention_heads lowerCamelCase_ = hidden_dropout lowerCamelCase_ = attention_dropout lowerCamelCase_ = activation_dropout lowerCamelCase_ = feat_proj_dropout lowerCamelCase_ = final_dropout lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = feature_layer_norm_eps lowerCamelCase_ = initializer_range lowerCamelCase_ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase_ = apply_spec_augment lowerCamelCase_ = mask_time_prob lowerCamelCase_ = mask_time_length lowerCamelCase_ = mask_time_min_masks lowerCamelCase_ = mask_feature_prob lowerCamelCase_ = mask_feature_length lowerCamelCase_ = mask_feature_min_masks # ctc loss lowerCamelCase_ = ctc_loss_reduction lowerCamelCase_ = ctc_zero_infinity # sequence classification lowerCamelCase_ = use_weighted_layer_sum lowerCamelCase_ = classifier_proj_size @property def snake_case ( self ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
675
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = ["image_processor", "tokenizer"] _lowerCamelCase = "OwlViTImageProcessor" _lowerCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCamelCase , ) lowerCamelCase_ = kwargs.pop("feature_extractor" ) lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCamelCase , UpperCamelCase ) def __call__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="max_length" , UpperCamelCase="np" , **UpperCamelCase ): """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(UpperCamelCase , UpperCamelCase ) or (isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(text[0] , UpperCamelCase )): lowerCamelCase_ = [self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )] elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(text[0] , UpperCamelCase ): lowerCamelCase_ = [] # Maximum number of queries across batch lowerCamelCase_ = max([len(UpperCamelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(UpperCamelCase ) != max_num_queries: lowerCamelCase_ = t + [" "] * (max_num_queries - len(UpperCamelCase )) lowerCamelCase_ = self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) encodings.append(UpperCamelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": lowerCamelCase_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowerCamelCase_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowerCamelCase_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) lowerCamelCase_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowerCamelCase_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) lowerCamelCase_ = BatchEncoding() lowerCamelCase_ = input_ids lowerCamelCase_ = attention_mask if query_images is not None: lowerCamelCase_ = BatchEncoding() lowerCamelCase_ = self.image_processor( UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ).pixel_values lowerCamelCase_ = query_pixel_values if images is not None: lowerCamelCase_ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: lowerCamelCase_ = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowerCamelCase_ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process_object_detection(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process_image_guided_detection(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase , ) return self.image_processor_class @property def snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase , ) return self.image_processor
675
1
'''simple docstring''' import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() a_ : List[Any] = logging.get_logger(__name__) def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] ): lowerCamelCase_ = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("encoder.deit.cls_token", "encoder.embeddings.cls_token"), ("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"), ("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"), ("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"), ("encoder.deit.norm.weight", "encoder.layernorm.weight"), ("encoder.deit.norm.bias", "encoder.layernorm.bias"), ] ) return rename_keys def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple ): for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) lowerCamelCase_ = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' ) lowerCamelCase_ = in_proj_weight[ : encoder_config.hidden_size, : ] lowerCamelCase_ = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] lowerCamelCase_ = in_proj_weight[ -encoder_config.hidden_size :, : ] def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ): lowerCamelCase_ = dct.pop(UpperCAmelCase_ ) lowerCamelCase_ = val def __snake_case ( UpperCAmelCase_ : int ): if "handwritten" in checkpoint_url: lowerCamelCase_ = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: lowerCamelCase_ = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg" lowerCamelCase_ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert("RGB" ) return im @torch.no_grad() def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ): lowerCamelCase_ = ViTConfig(image_size=384 , qkv_bias=UpperCAmelCase_ ) lowerCamelCase_ = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: lowerCamelCase_ = 768 elif "large" in checkpoint_url: # use ViT-large encoder lowerCamelCase_ = 1024 lowerCamelCase_ = 4096 lowerCamelCase_ = 24 lowerCamelCase_ = 16 lowerCamelCase_ = 1024 else: raise ValueError("Should either find 'base' or 'large' in checkpoint URL" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: lowerCamelCase_ = False lowerCamelCase_ = "relu" lowerCamelCase_ = 1024 lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = False # load HuggingFace model lowerCamelCase_ = ViTModel(UpperCAmelCase_ , add_pooling_layer=UpperCAmelCase_ ) lowerCamelCase_ = TrOCRForCausalLM(UpperCAmelCase_ ) lowerCamelCase_ = VisionEncoderDecoderModel(encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ ) model.eval() # load state_dict of original model, rename some keys lowerCamelCase_ = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location="cpu" , check_hash=UpperCAmelCase_ )["model"] lowerCamelCase_ = create_rename_keys(UpperCAmelCase_ , UpperCAmelCase_ ) for src, dest in rename_keys: rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): lowerCamelCase_ = state_dict.pop(UpperCAmelCase_ ) if key.startswith("decoder" ) and "output_projection" not in key: lowerCamelCase_ = val else: lowerCamelCase_ = val # load state dict model.load_state_dict(UpperCAmelCase_ ) # Check outputs on an image lowerCamelCase_ = ViTImageProcessor(size=encoder_config.image_size ) lowerCamelCase_ = RobertaTokenizer.from_pretrained("roberta-large" ) lowerCamelCase_ = TrOCRProcessor(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = processor(images=prepare_img(UpperCAmelCase_ ) , return_tensors="pt" ).pixel_values # verify logits lowerCamelCase_ = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) lowerCamelCase_ = model(pixel_values=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ ) lowerCamelCase_ = outputs.logits lowerCamelCase_ = torch.Size([1, 1, 50265] ) if "trocr-base-handwritten" in checkpoint_url: lowerCamelCase_ = torch.tensor( [-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] ) elif "trocr-large-handwritten" in checkpoint_url: lowerCamelCase_ = torch.tensor( [-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] ) elif "trocr-base-printed" in checkpoint_url: lowerCamelCase_ = torch.tensor( [-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] ) elif "trocr-large-printed" in checkpoint_url: lowerCamelCase_ = torch.tensor( [-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , UpperCAmelCase_ , atol=1E-3 ), "First elements of logits not as expected" Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase_ ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) a_ : Any = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
675
'''simple docstring''' import os import sys import unittest a_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path a_ : Tuple = os.path.join(git_repo_path, """src""", """transformers""") a_ : List[Any] = """ {0} = None """ a_ : Optional[Any] = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ a_ : str = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" ) self.assertIsNone(UpperCamelCase ) lowerCamelCase_ = find_backend(" if not is_tokenizers_available():" ) self.assertEqual(UpperCamelCase , "tokenizers" ) lowerCamelCase_ = find_backend(" if not is_tensorflow_text_available():" ) self.assertEqual(UpperCamelCase , "tensorflow_text" ) lowerCamelCase_ = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers" ) lowerCamelCase_ = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tensorflow_text" ) lowerCamelCase_ = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers_and_vision" ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , UpperCamelCase ) self.assertIn("tensorflow_text" , UpperCamelCase ) self.assertIn("sentencepiece_and_tokenizers" , UpperCamelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertModel" , objects["tf"] ) self.assertIn("FlaxBertModel" , objects["flax"] ) self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] ) self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(UpperCamelCase , "\nCONSTANT = None\n" ) lowerCamelCase_ = create_dummy_object("function" , "'torch'" ) self.assertEqual( UpperCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) lowerCamelCase_ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n" lowerCamelCase_ = create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n" lowerCamelCase_ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , UpperCamelCase )
675
1
'''simple docstring''' import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def __snake_case ( UpperCAmelCase_ : List[Any] ): lowerCamelCase_ = int(UpperCAmelCase_ ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = t // 3600, (t // 60) % 60, t % 60 return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}''' def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=300 ): # docstyle-ignore return F''' <div> {prefix} <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress> {label} </div> ''' def __snake_case ( UpperCAmelCase_ : int ): lowerCamelCase_ = "<table border=\"1\" class=\"dataframe\">\n" html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F''' <th>{i}</th>\n''' html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: lowerCamelCase_ = F'''{elt:.6f}''' if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else str(UpperCAmelCase_ ) html_code += F''' <td>{elt}</td>\n''' html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class snake_case : """simple docstring""" _lowerCamelCase = 5 _lowerCamelCase = 0.2 def __init__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = 300 , ): """simple docstring""" lowerCamelCase_ = total lowerCamelCase_ = "" if prefix is None else prefix lowerCamelCase_ = leave lowerCamelCase_ = parent lowerCamelCase_ = width lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None def snake_case ( self , UpperCamelCase , UpperCamelCase = False , UpperCamelCase = None ): """simple docstring""" lowerCamelCase_ = value if comment is not None: lowerCamelCase_ = comment if self.last_value is None: lowerCamelCase_ = lowerCamelCase_ = time.time() lowerCamelCase_ = lowerCamelCase_ = value lowerCamelCase_ = lowerCamelCase_ = None lowerCamelCase_ = self.warmup lowerCamelCase_ = 1 self.update_bar(UpperCamelCase ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 lowerCamelCase_ = time.time() lowerCamelCase_ = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: lowerCamelCase_ = self.elapsed_time / (value - self.start_value) else: lowerCamelCase_ = None if value >= self.total: lowerCamelCase_ = self.total lowerCamelCase_ = None if not self.leave: self.close() elif self.average_time_per_item is not None: lowerCamelCase_ = self.average_time_per_item * (self.total - value) self.update_bar(UpperCamelCase ) lowerCamelCase_ = value lowerCamelCase_ = current_time if self.average_time_per_item is None: lowerCamelCase_ = 1 else: lowerCamelCase_ = max(int(self.update_every / self.average_time_per_item ) , 1 ) def snake_case ( self , UpperCamelCase , UpperCamelCase=None ): """simple docstring""" lowerCamelCase_ = " " * (len(str(self.total ) ) - len(str(UpperCamelCase ) )) + str(UpperCamelCase ) if self.elapsed_time is None: lowerCamelCase_ = f'''[{spaced_value}/{self.total} : < :''' elif self.predicted_remaining is None: lowerCamelCase_ = f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}''' else: lowerCamelCase_ = ( f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <''' f''' {format_time(self.predicted_remaining )}''' ) self.label += f''', {1/self.average_time_per_item:.2f} it/s''' self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]''' self.display() def snake_case ( self ): """simple docstring""" lowerCamelCase_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: lowerCamelCase_ = disp.display(disp.HTML(self.html_code ) , display_id=UpperCamelCase ) else: self.output.update(disp.HTML(self.html_code ) ) def snake_case ( self ): """simple docstring""" if self.parent is None and self.output is not None: self.output.update(disp.HTML("" ) ) class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=None ): """simple docstring""" super().__init__(UpperCamelCase ) lowerCamelCase_ = None if column_names is None else [column_names] lowerCamelCase_ = None def snake_case ( self ): """simple docstring""" lowerCamelCase_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: lowerCamelCase_ = disp.display(disp.HTML(self.html_code ) , display_id=UpperCamelCase ) else: self.output.update(disp.HTML(self.html_code ) ) def snake_case ( self , UpperCamelCase ): """simple docstring""" if self.inner_table is None: lowerCamelCase_ = [list(values.keys() ), list(values.values() )] else: lowerCamelCase_ = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(UpperCamelCase ) lowerCamelCase_ = columns self.inner_table.append([values[c] for c in columns] ) def snake_case ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=300 ): """simple docstring""" lowerCamelCase_ = NotebookProgressBar(UpperCamelCase , prefix=UpperCamelCase , parent=self , width=UpperCamelCase ) return self.child_bar def snake_case ( self ): """simple docstring""" lowerCamelCase_ = None self.display() class snake_case ( lowercase ): """simple docstring""" def __init__( self ): """simple docstring""" lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = False def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step" lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = [self.first_column] + ["Training Loss"] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append("Validation Loss" ) lowerCamelCase_ = NotebookTrainingTracker(state.max_steps , UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}''' self.training_tracker.update( state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , ) lowerCamelCase_ = False def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , **UpperCamelCase ): """simple docstring""" if not has_length(UpperCamelCase ): return if self.prediction_bar is None: if self.training_tracker is not None: lowerCamelCase_ = self.training_tracker.add_child(len(UpperCamelCase ) ) else: lowerCamelCase_ = NotebookProgressBar(len(UpperCamelCase ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ): """simple docstring""" if self.prediction_bar is not None: self.prediction_bar.close() lowerCamelCase_ = None def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , **UpperCamelCase ): """simple docstring""" # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: lowerCamelCase_ = {"Training Loss": logs["loss"]} # First column is necessarily Step sine we're not in epoch eval strategy lowerCamelCase_ = state.global_step self.training_tracker.write_line(UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , **UpperCamelCase ): """simple docstring""" if self.training_tracker is not None: lowerCamelCase_ = {"Training Loss": "No log", "Validation Loss": "No log"} for log in reversed(state.log_history ): if "loss" in log: lowerCamelCase_ = log["loss"] break if self.first_column == "Epoch": lowerCamelCase_ = int(state.epoch ) else: lowerCamelCase_ = state.global_step lowerCamelCase_ = "eval" for k in metrics: if k.endswith("_loss" ): lowerCamelCase_ = re.sub(r"\_loss$" , "" , UpperCamelCase ) lowerCamelCase_ = metrics.pop("total_flos" , UpperCamelCase ) lowerCamelCase_ = metrics.pop("epoch" , UpperCamelCase ) lowerCamelCase_ = metrics.pop(f'''{metric_key_prefix}_runtime''' , UpperCamelCase ) lowerCamelCase_ = metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , UpperCamelCase ) lowerCamelCase_ = metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , UpperCamelCase ) lowerCamelCase_ = metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , UpperCamelCase ) for k, v in metrics.items(): if k == f'''{metric_key_prefix}_loss''': lowerCamelCase_ = v else: lowerCamelCase_ = k.split("_" ) lowerCamelCase_ = " ".join([part.capitalize() for part in splits[1:]] ) lowerCamelCase_ = v self.training_tracker.write_line(UpperCamelCase ) self.training_tracker.remove_child() lowerCamelCase_ = None # Evaluation takes a long time so we should force the next update. lowerCamelCase_ = True def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ): """simple docstring""" self.training_tracker.update( state.global_step , comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=UpperCamelCase ) lowerCamelCase_ = None
675
'''simple docstring''' from ..utils import DummyObject, requires_backends class snake_case ( metaclass=lowercase ): """simple docstring""" _lowerCamelCase = ["onnx"] def __init__( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(self , ["onnx"] ) @classmethod def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(cls , ["onnx"] ) @classmethod def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(cls , ["onnx"] )
675
1
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets a_ : Optional[Any] = """ IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. """ a_ : List[Any] = """ Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric(\"mean_iou\") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} """ a_ : List[Any] = """\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }""" def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ): if label_map is not None: for old_id, new_id in label_map.items(): lowerCamelCase_ = new_id # turn into Numpy arrays lowerCamelCase_ = np.array(UpperCAmelCase_ ) lowerCamelCase_ = np.array(UpperCAmelCase_ ) if reduce_labels: lowerCamelCase_ = 255 lowerCamelCase_ = label - 1 lowerCamelCase_ = 255 lowerCamelCase_ = label != ignore_index lowerCamelCase_ = np.not_equal(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = pred_label[mask] lowerCamelCase_ = np.array(UpperCAmelCase_ )[mask] lowerCamelCase_ = pred_label[pred_label == label] lowerCamelCase_ = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0] lowerCamelCase_ = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0] lowerCamelCase_ = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0] lowerCamelCase_ = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ): lowerCamelCase_ = np.zeros((num_labels,) , dtype=np.floataa ) lowerCamelCase_ = np.zeros((num_labels,) , dtype=np.floataa ) lowerCamelCase_ = np.zeros((num_labels,) , dtype=np.floataa ) lowerCamelCase_ = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = intersect_and_union( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ): lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = total_intersect_and_union( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # compute metrics lowerCamelCase_ = {} lowerCamelCase_ = total_area_intersect.sum() / total_area_label.sum() lowerCamelCase_ = total_area_intersect / total_area_union lowerCamelCase_ = total_area_intersect / total_area_label lowerCamelCase_ = np.nanmean(UpperCAmelCase_ ) lowerCamelCase_ = np.nanmean(UpperCAmelCase_ ) lowerCamelCase_ = all_acc lowerCamelCase_ = iou lowerCamelCase_ = acc if nan_to_num is not None: lowerCamelCase_ = {metric: np.nan_to_num(UpperCAmelCase_ , nan=UpperCAmelCase_ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): """simple docstring""" def snake_case ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { "predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), "references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ), } ) , reference_urls=[ "https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py" ] , ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = False , ): """simple docstring""" lowerCamelCase_ = mean_iou( results=UpperCamelCase , gt_seg_maps=UpperCamelCase , num_labels=UpperCamelCase , ignore_index=UpperCamelCase , nan_to_num=UpperCamelCase , label_map=UpperCamelCase , reduce_labels=UpperCamelCase , ) return iou_result
675
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , UpperCamelCase=1000 , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope lowerCamelCase_ = range_bbox def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCamelCase_ = bbox[i, j, 3] lowerCamelCase_ = bbox[i, j, 1] lowerCamelCase_ = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCamelCase_ = bbox[i, j, 2] lowerCamelCase_ = bbox[i, j, 0] lowerCamelCase_ = t lowerCamelCase_ = tf.convert_to_tensor(UpperCamelCase ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModel(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMForMaskedLM(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = TFLayoutLMForSequenceClassification(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = TFLayoutLMForTokenClassification(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMForQuestionAnswering(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) _lowerCamelCase = ( { "feature-extraction": TFLayoutLMModel, "fill-mask": TFLayoutLMForMaskedLM, "text-classification": TFLayoutLMForSequenceClassification, "token-classification": TFLayoutLMForTokenClassification, "zero-shot": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase = False _lowerCamelCase = True _lowerCamelCase = 10 def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFLayoutLMModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def snake_case ( self ): """simple docstring""" pass def __snake_case ( ): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off lowerCamelCase_ = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) lowerCamelCase_ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class snake_case ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) # test the sequence output on [0, :3, :3] lowerCamelCase_ = tf.convert_to_tensor( [[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase , atol=1e-3 ) ) # test the pooled output on [1, :3] lowerCamelCase_ = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , UpperCamelCase , atol=1e-3 ) ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized sequence classification head lowerCamelCase_ = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model( input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar lowerCamelCase_ = outputs.loss lowerCamelCase_ = (2,) self.assertEqual(loss.shape , UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = outputs.logits lowerCamelCase_ = (2, 2) self.assertEqual(logits.shape , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized token classification head lowerCamelCase_ = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model( input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = outputs.logits lowerCamelCase_ = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized token classification head lowerCamelCase_ = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , UpperCamelCase ) self.assertEqual(outputs.end_logits.shape , UpperCamelCase )
675
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : Any = { """uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""", } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = "mra" def __init__( self , UpperCamelCase=5_0265 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=1 , UpperCamelCase=0.02 , UpperCamelCase=1e-5 , UpperCamelCase="absolute" , UpperCamelCase=4 , UpperCamelCase="full" , UpperCamelCase=0 , UpperCamelCase=0 , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=2 , **UpperCamelCase , ): """simple docstring""" super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase ) lowerCamelCase_ = vocab_size lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = initializer_range lowerCamelCase_ = type_vocab_size lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = position_embedding_type lowerCamelCase_ = block_per_row lowerCamelCase_ = approx_mode lowerCamelCase_ = initial_prior_first_n_blocks lowerCamelCase_ = initial_prior_diagonal_n_blocks
675
'''simple docstring''' import argparse from collections import defaultdict import yaml a_ : int = """docs/source/en/_toctree.yml""" def __snake_case ( UpperCAmelCase_ : Optional[int] ): lowerCamelCase_ = defaultdict(UpperCAmelCase_ ) lowerCamelCase_ = [] lowerCamelCase_ = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"local": doc["local"], "title": doc["title"]} ) else: new_doc_list.append(UpperCAmelCase_ ) lowerCamelCase_ = new_doc_list lowerCamelCase_ = [key for key, value in counts.items() if value > 1] lowerCamelCase_ = [] for duplicate_key in duplicates: lowerCamelCase_ = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} ) if len(UpperCAmelCase_ ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] ) lowerCamelCase_ = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(UpperCAmelCase_ ) > 1: raise ValueError("{doc_list} has two 'overview' docs which is not allowed." ) overview_doc.extend(UpperCAmelCase_ ) # Sort return overview_doc def __snake_case ( UpperCAmelCase_ : List[str]=False ): with open(UpperCAmelCase_ , encoding="utf-8" ) as f: lowerCamelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCamelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCamelCase_ = content[api_idx]["sections"] # Then to the model doc lowerCamelCase_ = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 lowerCamelCase_ = api_doc[scheduler_idx]["sections"] lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ ) lowerCamelCase_ = False if new_scheduler_doc != scheduler_doc: lowerCamelCase_ = True if overwrite: lowerCamelCase_ = new_scheduler_doc if diff: if overwrite: lowerCamelCase_ = api_doc with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) def __snake_case ( UpperCAmelCase_ : List[Any]=False ): with open(UpperCAmelCase_ , encoding="utf-8" ) as f: lowerCamelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCamelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCamelCase_ = content[api_idx]["sections"] # Then to the model doc lowerCamelCase_ = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 lowerCamelCase_ = False lowerCamelCase_ = api_doc[pipeline_idx]["sections"] lowerCamelCase_ = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: lowerCamelCase_ = pipeline_doc["section"] lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ ) if overwrite: lowerCamelCase_ = new_sub_pipeline_doc new_pipeline_docs.append(UpperCAmelCase_ ) # sort overall pipeline doc lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ ) if new_pipeline_docs != pipeline_docs: lowerCamelCase_ = True if overwrite: lowerCamelCase_ = new_pipeline_docs if diff: if overwrite: lowerCamelCase_ = api_doc with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": a_ : Tuple = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") a_ : int = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
675
1
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import _LazyModule a_ : str = {"""tokenization_tapex""": ["""TapexTokenizer"""]} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys a_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
675
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=1024 , UpperCAmelCase_ : Tuple=1024 , UpperCAmelCase_ : List[Any]=False , **UpperCAmelCase_ : Optional[Any] ): lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="train" , **UpperCAmelCase_ ) lowerCamelCase_ = tok.pad_token_id def get_lens(UpperCAmelCase_ : List[str] ): lowerCamelCase_ = tqdm( DataLoader(UpperCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) lowerCamelCase_ = [] for batch in dl: lowerCamelCase_ = batch["input_ids"].ne(UpperCAmelCase_ ).sum(1 ).tolist() lowerCamelCase_ = batch["labels"].ne(UpperCAmelCase_ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(UpperCAmelCase_ , UpperCAmelCase_ ): max_lens.append(max(UpperCAmelCase_ , UpperCAmelCase_ ) ) else: max_lens.extend(UpperCAmelCase_ ) return max_lens lowerCamelCase_ = get_lens(UpperCAmelCase_ ) lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="val" , **UpperCAmelCase_ ) lowerCamelCase_ = get_lens(UpperCAmelCase_ ) pickle_save(UpperCAmelCase_ , train_ds.len_file ) pickle_save(UpperCAmelCase_ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
675
1
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : List[Any] = logging.get_logger(__name__) a_ : List[Any] = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } a_ : Optional[Any] = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } a_ : int = {"""facebook/blenderbot_small-90M""": 512} def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = set() lowerCamelCase_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase_ = char lowerCamelCase_ = set(UpperCAmelCase_ ) return pairs class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = ["input_ids", "attention_mask"] def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase="__start__" , UpperCamelCase="__end__" , UpperCamelCase="__unk__" , UpperCamelCase="__null__" , **UpperCamelCase , ): """simple docstring""" super().__init__(unk_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , pad_token=UpperCamelCase , **UpperCamelCase ) with open(UpperCamelCase , encoding="utf-8" ) as vocab_handle: lowerCamelCase_ = json.load(UpperCamelCase ) lowerCamelCase_ = {v: k for k, v in self.encoder.items()} with open(UpperCamelCase , encoding="utf-8" ) as merges_handle: lowerCamelCase_ = merges_handle.read().split("\n" )[1:-1] lowerCamelCase_ = [tuple(merge.split() ) for merge in merges] lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) lowerCamelCase_ = {} @property def snake_case ( self ): """simple docstring""" return len(self.encoder ) def snake_case ( self ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def snake_case ( self , UpperCamelCase ): """simple docstring""" if token in self.cache: return self.cache[token] lowerCamelCase_ = re.sub("([.,!?()])" , r" \1" , UpperCamelCase ) lowerCamelCase_ = re.sub("(')" , r" \1 " , UpperCamelCase ) lowerCamelCase_ = re.sub(r"\s{2,}" , " " , UpperCamelCase ) if "\n" in token: lowerCamelCase_ = token.replace("\n" , " __newln__" ) lowerCamelCase_ = token.split(" " ) lowerCamelCase_ = [] for token in tokens: if not len(UpperCamelCase ): continue lowerCamelCase_ = token.lower() lowerCamelCase_ = tuple(UpperCamelCase ) lowerCamelCase_ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) lowerCamelCase_ = get_pairs(UpperCamelCase ) if not pairs: words.append(UpperCamelCase ) continue while True: lowerCamelCase_ = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_ ,lowerCamelCase_ = bigram lowerCamelCase_ = [] lowerCamelCase_ = 0 while i < len(UpperCamelCase ): try: lowerCamelCase_ = word.index(UpperCamelCase , UpperCamelCase ) new_word.extend(word[i:j] ) lowerCamelCase_ = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_ = tuple(UpperCamelCase ) lowerCamelCase_ = new_word if len(UpperCamelCase ) == 1: break else: lowerCamelCase_ = get_pairs(UpperCamelCase ) lowerCamelCase_ = "@@ ".join(UpperCamelCase ) lowerCamelCase_ = word[:-4] lowerCamelCase_ = word words.append(UpperCamelCase ) return " ".join(UpperCamelCase ) def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = [] lowerCamelCase_ = re.findall(r"\S+\n?" , UpperCamelCase ) for token in words: split_tokens.extend(list(self.bpe(UpperCamelCase ).split(" " ) ) ) return split_tokens def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = token.lower() return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) ) def snake_case ( self , UpperCamelCase ): """simple docstring""" return self.decoder.get(UpperCamelCase , self.unk_token ) def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = " ".join(UpperCamelCase ).replace("@@ " , "" ).strip() return out_string def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" if not os.path.isdir(UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ = os.path.join( UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase_ = os.path.join( UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(UpperCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + "\n" ) lowerCamelCase_ = 0 with open(UpperCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) lowerCamelCase_ = token_index writer.write(" ".join(UpperCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file
675
'''simple docstring''' def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key lowerCamelCase_ = remove_duplicates(key.upper() ) lowerCamelCase_ = len(UpperCAmelCase_ ) # First fill cipher with key characters lowerCamelCase_ = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase_ )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(UpperCAmelCase_ ) , 26 ): lowerCamelCase_ = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 lowerCamelCase_ = alphabet[i - offset] lowerCamelCase_ = char return cipher_alphabet def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict[str, str] ): return "".join(cipher_map.get(UpperCAmelCase_ , UpperCAmelCase_ ) for ch in message.upper() ) def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict[str, str] ): lowerCamelCase_ = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(UpperCAmelCase_ , UpperCAmelCase_ ) for ch in message.upper() ) def __snake_case ( ): lowerCamelCase_ = input("Enter message to encode or decode: " ).strip() lowerCamelCase_ = input("Enter keyword: " ).strip() lowerCamelCase_ = input("Encipher or decipher? E/D:" ).strip()[0].lower() try: lowerCamelCase_ = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError("invalid input option" ) lowerCamelCase_ = create_cipher_map(UpperCAmelCase_ ) print(func(UpperCAmelCase_ , UpperCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
675
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a_ : Optional[Any] = { """configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""], """tokenization_ctrl""": ["""CTRLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : str = [ """CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """CTRLForSequenceClassification""", """CTRLLMHeadModel""", """CTRLModel""", """CTRLPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ """TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCTRLForSequenceClassification""", """TFCTRLLMHeadModel""", """TFCTRLModel""", """TFCTRLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
675
'''simple docstring''' import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = OpenAIGPTTokenizer _lowerCamelCase = OpenAIGPTTokenizerFast _lowerCamelCase = True _lowerCamelCase = False def snake_case ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) lowerCamelCase_ = ["#version: 0.2", "l o", "lo w", "e r</w>", ""] lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(UpperCamelCase ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(UpperCamelCase ) ) def snake_case ( self , UpperCamelCase ): """simple docstring""" return "lower newer", "lower newer" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) lowerCamelCase_ = "lower" lowerCamelCase_ = ["low", "er</w>"] lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = tokens + ["<unk>"] lowerCamelCase_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase ) def snake_case ( self , UpperCamelCase=15 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) # Simple input lowerCamelCase_ = "This is a simple input" lowerCamelCase_ = ["This is a simple input 1", "This is a simple input 2"] lowerCamelCase_ = ("This is a simple input", "This is a pair") lowerCamelCase_ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Simple input self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Simple input self.assertRaises( UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , ) # Pair input self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Pair input self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Pair input self.assertRaises( UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , ) def snake_case ( self ): """simple docstring""" pass @require_ftfy @require_spacy @require_tokenizers class snake_case ( lowercase ): """simple docstring""" pass
675
1
'''simple docstring''' def __snake_case ( UpperCAmelCase_ : str ): assert column_title.isupper() lowerCamelCase_ = 0 lowerCamelCase_ = len(UpperCAmelCase_ ) - 1 lowerCamelCase_ = 0 while index >= 0: lowerCamelCase_ = (ord(column_title[index] ) - 64) * pow(26 , UpperCAmelCase_ ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
675
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer a_ : Optional[int] = logging.get_logger(__name__) a_ : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} a_ : int = { """vocab_file""": { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt""" ), } } a_ : Any = { """junnyu/roformer_chinese_small""": 1536, """junnyu/roformer_chinese_base""": 1536, """junnyu/roformer_chinese_char_small""": 512, """junnyu/roformer_chinese_char_base""": 512, """junnyu/roformer_small_discriminator""": 128, """junnyu/roformer_small_generator""": 128, } a_ : List[Any] = { """junnyu/roformer_chinese_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_base""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True}, """junnyu/roformer_small_discriminator""": {"""do_lower_case""": True}, """junnyu/roformer_small_generator""": {"""do_lower_case""": True}, } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase = RoFormerTokenizer def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ): """simple docstring""" super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , ) lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("lowercase" , UpperCamelCase ) != do_lower_case or pre_tok_state.get("strip_accents" , UpperCamelCase ) != strip_accents ): lowerCamelCase_ = getattr(UpperCamelCase , pre_tok_state.pop("type" ) ) lowerCamelCase_ = do_lower_case lowerCamelCase_ = strip_accents lowerCamelCase_ = pre_tok_class(**UpperCamelCase ) lowerCamelCase_ = do_lower_case def __getstate__( self ): """simple docstring""" lowerCamelCase_ = self.__dict__.copy() lowerCamelCase_ = BertPreTokenizer() return state def __setstate__( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = d lowerCamelCase_ = self.__dict__["_tokenizer"].get_vocab() lowerCamelCase_ = PreTokenizer.custom(JiebaPreTokenizer(UpperCamelCase ) ) def snake_case ( self , UpperCamelCase , UpperCamelCase=None ): """simple docstring""" lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , **UpperCamelCase , ): """simple docstring""" lowerCamelCase_ = BertPreTokenizer() return super().save_pretrained(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
675
1
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller a_ : List[str] = 3 def __snake_case ( UpperCAmelCase_ : int ): print("Generating primitive root of p" ) while True: lowerCamelCase_ = random.randrange(3 , UpperCAmelCase_ ) if pow(UpperCAmelCase_ , 2 , UpperCAmelCase_ ) == 1: continue if pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) == 1: continue return g def __snake_case ( UpperCAmelCase_ : int ): print("Generating prime p..." ) lowerCamelCase_ = rabin_miller.generate_large_prime(UpperCAmelCase_ ) # select large prime number. lowerCamelCase_ = primitive_root(UpperCAmelCase_ ) # one primitive root on modulo p. lowerCamelCase_ = random.randrange(3 , UpperCAmelCase_ ) # private_key -> have to be greater than 2 for safety. lowerCamelCase_ = cryptomath.find_mod_inverse(pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ ) lowerCamelCase_ = (key_size, e_a, e_a, p) lowerCamelCase_ = (key_size, d) return public_key, private_key def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ): if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ): print("\nWARNING:" ) print( F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' "Use a different name or delete these files and re-run this program." ) sys.exit() lowerCamelCase_ ,lowerCamelCase_ = generate_key(UpperCAmelCase_ ) print(F'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(F'''{name}_pubkey.txt''' , "w" ) as fo: fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' ) print(F'''Writing private key to file {name}_privkey.txt...''' ) with open(F'''{name}_privkey.txt''' , "w" ) as fo: fo.write(F'''{private_key[0]},{private_key[1]}''' ) def __snake_case ( ): print("Making key files..." ) make_key_files("elgamal" , 2048 ) print("Key files generation successful" ) if __name__ == "__main__": main()
675
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=32 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=[10, 20, 30, 40] , UpperCamelCase=[2, 2, 3, 2] , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=10 , UpperCamelCase=0.02 , UpperCamelCase=["stage2", "stage3", "stage4"] , UpperCamelCase=3 , UpperCamelCase=None , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = num_channels lowerCamelCase_ = num_stages lowerCamelCase_ = hidden_sizes lowerCamelCase_ = depths lowerCamelCase_ = is_training lowerCamelCase_ = use_labels lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = out_features lowerCamelCase_ = num_labels lowerCamelCase_ = scope lowerCamelCase_ = num_stages def snake_case ( self ): """simple docstring""" lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def snake_case ( self ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def snake_case ( self ): """simple docstring""" return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = UperNetForSemanticSegmentation(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = (UperNetForSemanticSegmentation,) if is_torch_available() else () _lowerCamelCase = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def snake_case ( self ): """simple docstring""" lowerCamelCase_ = UperNetModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self ): """simple docstring""" return def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase ) @unittest.skip(reason="UperNet does not use inputs_embeds" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="UperNet does not support input and output embeddings" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="UperNet does not have a base model" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="UperNet does not have a base model" ) def snake_case ( self ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = _config_zero_init(UpperCamelCase ) lowerCamelCase_ = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowerCamelCase_ = model_class(config=UpperCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip(reason="UperNet does not have tied weights" ) def snake_case ( self ): """simple docstring""" pass @slow def snake_case ( self ): """simple docstring""" for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def __snake_case ( ): lowerCamelCase_ = hf_hub_download( repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" ) lowerCamelCase_ = Image.open(UpperCAmelCase_ ).convert("RGB" ) return image @require_torch @require_vision @slow class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" ) lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(UpperCamelCase ) lowerCamelCase_ = prepare_img() lowerCamelCase_ = processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase ) with torch.no_grad(): lowerCamelCase_ = model(**UpperCamelCase ) lowerCamelCase_ = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowerCamelCase_ = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" ) lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(UpperCamelCase ) lowerCamelCase_ = prepare_img() lowerCamelCase_ = processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase ) with torch.no_grad(): lowerCamelCase_ = model(**UpperCamelCase ) lowerCamelCase_ = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowerCamelCase_ = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
675
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ : Optional[int] = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Any = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Any = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys a_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
675
'''simple docstring''' from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration a_ : Optional[int] = HfArgumentParser(InitializationArguments) a_ : str = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization a_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks a_ : str = { """vocab_size""": len(tokenizer), """scale_attn_by_inverse_layer_idx""": True, """reorder_and_upcast_attn""": True, } # Load model config (GPT-2 large in this case) a_ : Optional[Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config a_ : Optional[Any] = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
675
1
'''simple docstring''' import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() a_ : Optional[int] = logging.get_logger(__name__) def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = SwinConfig.from_pretrained( "microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] ) lowerCamelCase_ = MaskFormerConfig(backbone_config=UpperCAmelCase_ ) lowerCamelCase_ = "huggingface/label-files" if "ade20k-full" in model_name: # this should be ok lowerCamelCase_ = 847 lowerCamelCase_ = "maskformer-ade20k-full-id2label.json" elif "ade" in model_name: # this should be ok lowerCamelCase_ = 150 lowerCamelCase_ = "ade20k-id2label.json" elif "coco-stuff" in model_name: # this should be ok lowerCamelCase_ = 171 lowerCamelCase_ = "maskformer-coco-stuff-id2label.json" elif "coco" in model_name: # TODO lowerCamelCase_ = 133 lowerCamelCase_ = "coco-panoptic-id2label.json" elif "cityscapes" in model_name: # this should be ok lowerCamelCase_ = 19 lowerCamelCase_ = "cityscapes-id2label.json" elif "vistas" in model_name: # this should be ok lowerCamelCase_ = 65 lowerCamelCase_ = "mapillary-vistas-id2label.json" lowerCamelCase_ = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="dataset" ) , "r" ) ) lowerCamelCase_ = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()} return config def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = [] # stem # fmt: off rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") ) rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") ) rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") ) rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") ) rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") ) # heads on top rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") ) rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") ) rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") ) rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") ) for i in range(3 ): rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ): lowerCamelCase_ = dct.pop(UpperCAmelCase_ ) lowerCamelCase_ = val def __snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] ): lowerCamelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowerCamelCase_ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowerCamelCase_ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) lowerCamelCase_ = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase_ = in_proj_weight[:dim, :] lowerCamelCase_ = in_proj_bias[: dim] lowerCamelCase_ = in_proj_weight[ dim : dim * 2, : ] lowerCamelCase_ = in_proj_bias[ dim : dim * 2 ] lowerCamelCase_ = in_proj_weight[ -dim :, : ] lowerCamelCase_ = in_proj_bias[-dim :] # fmt: on def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict ): # fmt: off lowerCamelCase_ = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) lowerCamelCase_ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) lowerCamelCase_ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase_ = in_proj_weight[: hidden_size, :] lowerCamelCase_ = in_proj_bias[:config.hidden_size] lowerCamelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :] lowerCamelCase_ = in_proj_bias[hidden_size : hidden_size * 2] lowerCamelCase_ = in_proj_weight[-hidden_size :, :] lowerCamelCase_ = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) lowerCamelCase_ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) lowerCamelCase_ = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase_ = in_proj_weight[: hidden_size, :] lowerCamelCase_ = in_proj_bias[:config.hidden_size] lowerCamelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :] lowerCamelCase_ = in_proj_bias[hidden_size : hidden_size * 2] lowerCamelCase_ = in_proj_weight[-hidden_size :, :] lowerCamelCase_ = in_proj_bias[-hidden_size :] # fmt: on def __snake_case ( ): lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase_ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ) return im @torch.no_grad() def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ): lowerCamelCase_ = get_maskformer_config(UpperCAmelCase_ ) # load original state_dict with open(UpperCAmelCase_ , "rb" ) as f: lowerCamelCase_ = pickle.load(UpperCAmelCase_ ) lowerCamelCase_ = data["model"] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys lowerCamelCase_ = create_rename_keys(UpperCAmelCase_ ) for src, dest in rename_keys: rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) read_in_swin_q_k_v(UpperCAmelCase_ , config.backbone_config ) read_in_decoder_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ ) # update to torch tensors for key, value in state_dict.items(): lowerCamelCase_ = torch.from_numpy(UpperCAmelCase_ ) # load 🤗 model lowerCamelCase_ = MaskFormerForInstanceSegmentation(UpperCAmelCase_ ) model.eval() for name, param in model.named_parameters(): print(UpperCAmelCase_ , param.shape ) lowerCamelCase_ ,lowerCamelCase_ = model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(UpperCAmelCase_ ) == 0, F'''Unexpected keys: {unexpected_keys}''' # verify results lowerCamelCase_ = prepare_img() if "vistas" in model_name: lowerCamelCase_ = 65 elif "cityscapes" in model_name: lowerCamelCase_ = 65535 else: lowerCamelCase_ = 255 lowerCamelCase_ = True if "ade" in model_name else False lowerCamelCase_ = MaskFormerImageProcessor(ignore_index=UpperCAmelCase_ , reduce_labels=UpperCAmelCase_ ) lowerCamelCase_ = image_processor(UpperCAmelCase_ , return_tensors="pt" ) lowerCamelCase_ = model(**UpperCAmelCase_ ) print("Logits:" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": lowerCamelCase_ = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ ) model.save_pretrained(UpperCAmelCase_ ) image_processor.save_pretrained(UpperCAmelCase_ ) if push_to_hub: print("Pushing model and image processor to the hub..." ) model.push_to_hub(F'''nielsr/{model_name}''' ) image_processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""maskformer-swin-tiny-ade""", type=str, help=("""Name of the MaskFormer model you'd like to convert""",), ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""", type=str, help="""Path to the original state dict (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) a_ : List[str] = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
675
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version a_ : List[Any] = get_logger(__name__) class snake_case : """simple docstring""" _lowerCamelCase = "dummy_data" _lowerCamelCase = "datasets" _lowerCamelCase = False def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = True , UpperCamelCase = None , ): """simple docstring""" lowerCamelCase_ = 0 lowerCamelCase_ = dataset_name lowerCamelCase_ = cache_dir lowerCamelCase_ = use_local_dummy_data lowerCamelCase_ = config # download_callbacks take a single url as input lowerCamelCase_ = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root lowerCamelCase_ = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general lowerCamelCase_ = str(UpperCamelCase ) # to be downloaded lowerCamelCase_ = None lowerCamelCase_ = None @property def snake_case ( self ): """simple docstring""" if self._dummy_file is None: lowerCamelCase_ = self.download_dummy_data() return self._dummy_file @property def snake_case ( self ): """simple docstring""" if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def snake_case ( self ): """simple docstring""" return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) lowerCamelCase_ = cached_path( UpperCamelCase , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase , force_extract=UpperCamelCase ) return os.path.join(UpperCamelCase , self.dummy_file_name ) @property def snake_case ( self ): """simple docstring""" return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def snake_case ( self ): """simple docstring""" if self._bucket_url is None: lowerCamelCase_ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def snake_case ( self ): """simple docstring""" # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def snake_case ( self , UpperCamelCase , *UpperCamelCase ): """simple docstring""" if self.load_existing_dummy_data: # dummy data is downloaded and tested lowerCamelCase_ = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned lowerCamelCase_ = self.dummy_file_name # special case when data_url is a dict if isinstance(UpperCamelCase , UpperCamelCase ): return self.create_dummy_data_dict(UpperCamelCase , UpperCamelCase ) elif isinstance(UpperCamelCase , (list, tuple) ): return self.create_dummy_data_list(UpperCamelCase , UpperCamelCase ) else: return self.create_dummy_data_single(UpperCamelCase , UpperCamelCase ) def snake_case ( self , UpperCamelCase , *UpperCamelCase ): """simple docstring""" return self.download_and_extract(UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" return self.download_and_extract(UpperCamelCase ) def snake_case ( self , UpperCamelCase , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return path def snake_case ( self ): """simple docstring""" return {} def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(UpperCamelCase , UpperCamelCase ): for single_url in single_urls: download_callback(UpperCamelCase ) else: lowerCamelCase_ = single_urls download_callback(UpperCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = [os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) ) for x in single_urls] else: lowerCamelCase_ = single_urls lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) ) lowerCamelCase_ = value # make sure that values are unique if all(isinstance(UpperCamelCase , UpperCamelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique lowerCamelCase_ = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one lowerCamelCase_ = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , UpperCamelCase ) ) for url in data_url ) lowerCamelCase_ = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): lowerCamelCase_ = [data_url[0]] * len(UpperCamelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(UpperCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(UpperCamelCase ) return dummy_data_list def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" for download_callback in self.download_callbacks: download_callback(UpperCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(UpperCamelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" pass def snake_case ( self , UpperCamelCase ): """simple docstring""" def _iter_archive_members(UpperCamelCase ): # this preserves the order of the members inside the ZIP archive lowerCamelCase_ = Path(self.dummy_file ).parent lowerCamelCase_ = path.relative_to(UpperCamelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: lowerCamelCase_ = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(UpperCamelCase ) lowerCamelCase_ = Path(UpperCamelCase ) lowerCamelCase_ = _iter_archive_members(UpperCamelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(UpperCamelCase ).as_posix(), file_path.open("rb" ) def snake_case ( self , UpperCamelCase ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = [paths] for path in paths: if os.path.isfile(UpperCamelCase ): if os.path.basename(UpperCamelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(UpperCamelCase ): if os.path.basename(UpperCamelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(UpperCamelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(UpperCamelCase , UpperCamelCase )
675
1
'''simple docstring''' from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = 42 class snake_case ( lowercase , lowercase ): """simple docstring""" _lowerCamelCase = True @register_to_config def __init__( self , UpperCamelCase = 3 , UpperCamelCase = 3 , UpperCamelCase = ("DownEncoderBlock2D",) , UpperCamelCase = ("UpDecoderBlock2D",) , UpperCamelCase = (64,) , UpperCamelCase = 1 , UpperCamelCase = "silu" , UpperCamelCase = 4 , UpperCamelCase = 32 , UpperCamelCase = 32 , UpperCamelCase = 0.18_215 , ): """simple docstring""" super().__init__() # pass init params to Encoder lowerCamelCase_ = Encoder( in_channels=UpperCamelCase , out_channels=UpperCamelCase , down_block_types=UpperCamelCase , block_out_channels=UpperCamelCase , layers_per_block=UpperCamelCase , act_fn=UpperCamelCase , norm_num_groups=UpperCamelCase , double_z=UpperCamelCase , ) # pass init params to Decoder lowerCamelCase_ = Decoder( in_channels=UpperCamelCase , out_channels=UpperCamelCase , up_block_types=UpperCamelCase , block_out_channels=UpperCamelCase , layers_per_block=UpperCamelCase , norm_num_groups=UpperCamelCase , act_fn=UpperCamelCase , ) lowerCamelCase_ = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) lowerCamelCase_ = nn.Convad(UpperCamelCase , UpperCamelCase , 1 ) lowerCamelCase_ = False lowerCamelCase_ = False # only relevant if vae tiling is enabled lowerCamelCase_ = self.config.sample_size lowerCamelCase_ = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) lowerCamelCase_ = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) lowerCamelCase_ = 0.25 def snake_case ( self , UpperCamelCase , UpperCamelCase=False ): """simple docstring""" if isinstance(UpperCamelCase , (Encoder, Decoder) ): lowerCamelCase_ = value def snake_case ( self , UpperCamelCase = True ): """simple docstring""" lowerCamelCase_ = use_tiling def snake_case ( self ): """simple docstring""" self.enable_tiling(UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = True def snake_case ( self ): """simple docstring""" lowerCamelCase_ = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def snake_case ( self ): """simple docstring""" lowerCamelCase_ = {} def fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase ): if hasattr(UpperCamelCase , "set_processor" ): lowerCamelCase_ = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f'''{name}.{sub_name}''' , UpperCamelCase , UpperCamelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return processors def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = len(self.attn_processors.keys() ) if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count: raise ValueError( f'''A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the''' f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' ) def fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase ): if hasattr(UpperCamelCase , "set_processor" ): if not isinstance(UpperCamelCase , UpperCamelCase ): module.set_processor(UpperCamelCase ) else: module.set_processor(processor.pop(f'''{name}.processor''' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f'''{name}.{sub_name}''' , UpperCamelCase , UpperCamelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def snake_case ( self , UpperCamelCase , UpperCamelCase = True ): """simple docstring""" if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(UpperCamelCase , return_dict=UpperCamelCase ) if self.use_slicing and x.shape[0] > 1: lowerCamelCase_ = [self.encoder(UpperCamelCase ) for x_slice in x.split(1 )] lowerCamelCase_ = torch.cat(UpperCamelCase ) else: lowerCamelCase_ = self.encoder(UpperCamelCase ) lowerCamelCase_ = self.quant_conv(UpperCamelCase ) lowerCamelCase_ = DiagonalGaussianDistribution(UpperCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase = True ): """simple docstring""" if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(UpperCamelCase , return_dict=UpperCamelCase ) lowerCamelCase_ = self.post_quant_conv(UpperCamelCase ) lowerCamelCase_ = self.decoder(UpperCamelCase ) if not return_dict: return (dec,) return DecoderOutput(sample=UpperCamelCase ) @apply_forward_hook def snake_case ( self , UpperCamelCase , UpperCamelCase = True ): """simple docstring""" if self.use_slicing and z.shape[0] > 1: lowerCamelCase_ = [self._decode(UpperCamelCase ).sample for z_slice in z.split(1 )] lowerCamelCase_ = torch.cat(UpperCamelCase ) else: lowerCamelCase_ = self._decode(UpperCamelCase ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = min(a.shape[2] , b.shape[2] , UpperCamelCase ) for y in range(UpperCamelCase ): lowerCamelCase_ = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = min(a.shape[3] , b.shape[3] , UpperCamelCase ) for x in range(UpperCamelCase ): lowerCamelCase_ = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def snake_case ( self , UpperCamelCase , UpperCamelCase = True ): """simple docstring""" lowerCamelCase_ = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) lowerCamelCase_ = int(self.tile_latent_min_size * self.tile_overlap_factor ) lowerCamelCase_ = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. lowerCamelCase_ = [] for i in range(0 , x.shape[2] , UpperCamelCase ): lowerCamelCase_ = [] for j in range(0 , x.shape[3] , UpperCamelCase ): lowerCamelCase_ = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] lowerCamelCase_ = self.encoder(UpperCamelCase ) lowerCamelCase_ = self.quant_conv(UpperCamelCase ) row.append(UpperCamelCase ) rows.append(UpperCamelCase ) lowerCamelCase_ = [] for i, row in enumerate(UpperCamelCase ): lowerCamelCase_ = [] for j, tile in enumerate(UpperCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: lowerCamelCase_ = self.blend_v(rows[i - 1][j] , UpperCamelCase , UpperCamelCase ) if j > 0: lowerCamelCase_ = self.blend_h(row[j - 1] , UpperCamelCase , UpperCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(UpperCamelCase , dim=3 ) ) lowerCamelCase_ = torch.cat(UpperCamelCase , dim=2 ) lowerCamelCase_ = DiagonalGaussianDistribution(UpperCamelCase ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase = True ): """simple docstring""" lowerCamelCase_ = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) lowerCamelCase_ = int(self.tile_sample_min_size * self.tile_overlap_factor ) lowerCamelCase_ = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. lowerCamelCase_ = [] for i in range(0 , z.shape[2] , UpperCamelCase ): lowerCamelCase_ = [] for j in range(0 , z.shape[3] , UpperCamelCase ): lowerCamelCase_ = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] lowerCamelCase_ = self.post_quant_conv(UpperCamelCase ) lowerCamelCase_ = self.decoder(UpperCamelCase ) row.append(UpperCamelCase ) rows.append(UpperCamelCase ) lowerCamelCase_ = [] for i, row in enumerate(UpperCamelCase ): lowerCamelCase_ = [] for j, tile in enumerate(UpperCamelCase ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: lowerCamelCase_ = self.blend_v(rows[i - 1][j] , UpperCamelCase , UpperCamelCase ) if j > 0: lowerCamelCase_ = self.blend_h(row[j - 1] , UpperCamelCase , UpperCamelCase ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(UpperCamelCase , dim=3 ) ) lowerCamelCase_ = torch.cat(UpperCamelCase , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase = False , UpperCamelCase = True , UpperCamelCase = None , ): """simple docstring""" lowerCamelCase_ = sample lowerCamelCase_ = self.encode(UpperCamelCase ).latent_dist if sample_posterior: lowerCamelCase_ = posterior.sample(generator=UpperCamelCase ) else: lowerCamelCase_ = posterior.mode() lowerCamelCase_ = self.decode(UpperCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=UpperCamelCase )
675
'''simple docstring''' import os def __snake_case ( UpperCAmelCase_ : str = "matrix.txt" ): with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as in_file: lowerCamelCase_ = in_file.read() lowerCamelCase_ = [[int(UpperCAmelCase_ ) for cell in row.split("," )] for row in data.strip().splitlines()] lowerCamelCase_ = [[0 for cell in row] for row in grid] lowerCamelCase_ = len(grid[0] ) lowerCamelCase_ = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )] lowerCamelCase_ = grid[0][0] for i in range(1 , UpperCAmelCase_ ): lowerCamelCase_ = grid[0][i] + dp[0][i - 1] for i in range(1 , UpperCAmelCase_ ): lowerCamelCase_ = grid[i][0] + dp[i - 1][0] for i in range(1 , UpperCAmelCase_ ): for j in range(1 , UpperCAmelCase_ ): lowerCamelCase_ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(f'''{solution() = }''')
675
1
'''simple docstring''' from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand a_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name def __snake_case ( UpperCAmelCase_ : str ): if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(UpperCAmelCase_ ): return ext raise Exception( F'''Unable to determine file format from file extension {path}. ''' F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' ) def __snake_case ( UpperCAmelCase_ : Optional[Any] ): lowerCamelCase_ = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) lowerCamelCase_ = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format lowerCamelCase_ = PipelineDataFormat.from_str( format=UpperCAmelCase_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(UpperCAmelCase_ , UpperCAmelCase_ ) class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = nlp lowerCamelCase_ = reader @staticmethod def snake_case ( UpperCamelCase ): """simple docstring""" lowerCamelCase_ = parser.add_parser("run" , help="Run a pipeline through the CLI" ) run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" ) run_parser.add_argument("--input" , type=UpperCamelCase , help="Path to the file to use for inference" ) run_parser.add_argument("--output" , type=UpperCamelCase , help="Path to the file that will be used post to write results." ) run_parser.add_argument("--model" , type=UpperCamelCase , help="Name or path to the model to instantiate." ) run_parser.add_argument("--config" , type=UpperCamelCase , help="Name or path to the model's config to instantiate." ) run_parser.add_argument( "--tokenizer" , type=UpperCamelCase , help="Name of the tokenizer to use. (default: same as the model name)" ) run_parser.add_argument( "--column" , type=UpperCamelCase , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , ) run_parser.add_argument( "--format" , type=UpperCamelCase , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , ) run_parser.add_argument( "--device" , type=UpperCamelCase , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , ) run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." ) run_parser.set_defaults(func=UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self._nlp, [] for entry in self._reader: lowerCamelCase_ = nlp(**UpperCamelCase ) if self._reader.is_multi_columns else nlp(UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ): outputs.append(UpperCamelCase ) else: outputs += output # Saving data if self._nlp.binary_output: lowerCamelCase_ = self._reader.save_binary(UpperCamelCase ) logger.warning(f'''Current pipeline requires output to be in binary format, saving at {binary_path}''' ) else: self._reader.save(UpperCamelCase )
675
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = inspect.getfile(accelerate.test_utils ) lowerCamelCase_ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 lowerCamelCase_ = test_metrics @require_cpu def snake_case ( self ): """simple docstring""" debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def snake_case ( self ): """simple docstring""" debug_launcher(self.test_metrics.main ) @require_single_gpu def snake_case ( self ): """simple docstring""" self.test_metrics.main() @require_multi_gpu def snake_case ( self ): """simple docstring""" print(f'''Found {torch.cuda.device_count()} devices.''' ) lowerCamelCase_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
675
1
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=1024 , UpperCAmelCase_ : Tuple=1024 , UpperCAmelCase_ : List[Any]=False , **UpperCAmelCase_ : Optional[Any] ): lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="train" , **UpperCAmelCase_ ) lowerCamelCase_ = tok.pad_token_id def get_lens(UpperCAmelCase_ : List[str] ): lowerCamelCase_ = tqdm( DataLoader(UpperCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) lowerCamelCase_ = [] for batch in dl: lowerCamelCase_ = batch["input_ids"].ne(UpperCAmelCase_ ).sum(1 ).tolist() lowerCamelCase_ = batch["labels"].ne(UpperCAmelCase_ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(UpperCAmelCase_ , UpperCAmelCase_ ): max_lens.append(max(UpperCAmelCase_ , UpperCAmelCase_ ) ) else: max_lens.extend(UpperCAmelCase_ ) return max_lens lowerCamelCase_ = get_lens(UpperCAmelCase_ ) lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="val" , **UpperCAmelCase_ ) lowerCamelCase_ = get_lens(UpperCAmelCase_ ) pickle_save(UpperCAmelCase_ , train_ds.len_file ) pickle_save(UpperCAmelCase_ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
675
'''simple docstring''' from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge a_ : Any = [ """Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the""" """ final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe""" """ depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""", """The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal""" """ accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's""" """ founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the""" """ body.""", """Amnesty International releases its annual report on the death penalty. The report catalogs the use of""" """ state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the""" """ world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital""" """ punishment.""", ] a_ : Optional[Any] = [ """Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""" """ Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz""" """ had informed his Lufthansa training school of an episode of severe depression, airline says .""", """Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .""" """ Israel and the United States opposed the move, which could open the door to war crimes investigations against""" """ Israelis .""", """Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to""" """ death . Organization claims that governments around the world are using the threat of terrorism to advance""" """ executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death""" """ sentences up by 28% .""", ] def __snake_case ( ): lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2", "rougeL"] ) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2"] ) assert ( pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean() ) def __snake_case ( ): lowerCamelCase_ = "rougeLsum" lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k] lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k] assert score > score_no_sep def __snake_case ( ): lowerCamelCase_ = ["rouge1", "rouge2", "rougeL"] lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ ) lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ ) assert score_sep == score_no_sep def __snake_case ( ): lowerCamelCase_ = [ "Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.", "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .", ] lowerCamelCase_ = [ "Margot Frank, died in 1945, a month earlier than previously thought.", "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of" " the final seconds on board Flight 9525.", ] assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) def __snake_case ( ): lowerCamelCase_ = [ "\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" " ] lowerCamelCase_ = [ " Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ." ] lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] , newline_sep=UpperCAmelCase_ )["rougeLsum"] lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] )["rougeLsum"] assert new_score > prev_score def __snake_case ( ): lowerCamelCase_ = Path("examples/seq2seq/test_data/wmt_en_ro" ) lowerCamelCase_ = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) ) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = calculate_rouge_path( data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=UpperCAmelCase_ ) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
675
1
'''simple docstring''' from __future__ import annotations a_ : List[Any] = [True] * 1000001 a_ : Optional[int] = 2 while i * i <= 1000000: if seive[i]: for j in range(i * i, 1000001, i): a_ : Any = False i += 1 def __snake_case ( UpperCAmelCase_ : int ): return seive[n] def __snake_case ( UpperCAmelCase_ : int ): return any(digit in "02468" for digit in str(UpperCAmelCase_ ) ) def __snake_case ( UpperCAmelCase_ : int = 1000000 ): lowerCamelCase_ = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(UpperCAmelCase_ ) and not contains_an_even_digit(UpperCAmelCase_ ): lowerCamelCase_ = str(UpperCAmelCase_ ) lowerCamelCase_ = [int(str_num[j:] + str_num[:j] ) for j in range(len(UpperCAmelCase_ ) )] if all(is_prime(UpperCAmelCase_ ) for i in list_nums ): result.append(UpperCAmelCase_ ) return result def __snake_case ( ): return len(find_circular_primes() ) if __name__ == "__main__": print(f'''{len(find_circular_primes()) = }''')
675
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() a_ : Optional[Any] = logging.get_logger("""transformers.models.encodec""") a_ : List[str] = { """quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""", """quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""", """quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""", """quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""", } a_ : Optional[int] = { """encoder.model.0.conv.conv""": """encoder.layers.0.conv""", """encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""", """encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""", """encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""", """encoder.model.3.conv.conv""": """encoder.layers.3.conv""", """encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""", """encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""", """encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""", """encoder.model.6.conv.conv""": """encoder.layers.6.conv""", """encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""", """encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""", """encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""", """encoder.model.9.conv.conv""": """encoder.layers.9.conv""", """encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""", """encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""", """encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""", """encoder.model.12.conv.conv""": """encoder.layers.12.conv""", """encoder.model.13.lstm""": """encoder.layers.13.lstm""", """encoder.model.15.conv.conv""": """encoder.layers.15.conv""", } a_ : Tuple = { """encoder.model.0.conv.norm""": """encoder.layers.0.norm""", """encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""", """encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""", """encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""", """encoder.model.3.conv.norm""": """encoder.layers.3.norm""", """encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""", """encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""", """encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""", """encoder.model.6.conv.norm""": """encoder.layers.6.norm""", """encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""", """encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""", """encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""", """encoder.model.9.conv.norm""": """encoder.layers.9.norm""", """encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""", """encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""", """encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""", """encoder.model.12.conv.norm""": """encoder.layers.12.norm""", """encoder.model.15.conv.norm""": """encoder.layers.15.norm""", } a_ : Union[str, Any] = { """decoder.model.0.conv.conv""": """decoder.layers.0.conv""", """decoder.model.1.lstm""": """decoder.layers.1.lstm""", """decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""", """decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""", """decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""", """decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""", """decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""", """decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""", """decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""", """decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""", """decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""", """decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""", """decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""", """decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""", """decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""", """decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""", """decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""", """decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""", """decoder.model.15.conv.conv""": """decoder.layers.15.conv""", } a_ : Union[str, Any] = { """decoder.model.0.conv.norm""": """decoder.layers.0.norm""", """decoder.model.3.convtr.norm""": """decoder.layers.3.norm""", """decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""", """decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""", """decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""", """decoder.model.6.convtr.norm""": """decoder.layers.6.norm""", """decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""", """decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""", """decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""", """decoder.model.9.convtr.norm""": """decoder.layers.9.norm""", """decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""", """decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""", """decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""", """decoder.model.12.convtr.norm""": """decoder.layers.12.norm""", """decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""", """decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""", """decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""", """decoder.model.15.conv.norm""": """decoder.layers.15.norm""", } a_ : Optional[Any] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } a_ : List[str] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } a_ : Any = [] a_ : str = [] def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ): for attribute in key.split("." ): lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) if weight_type is not None: lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape else: lowerCamelCase_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase_ = value elif weight_type == "weight_g": lowerCamelCase_ = value elif weight_type == "weight_v": lowerCamelCase_ = value elif weight_type == "bias": lowerCamelCase_ = value elif weight_type == "running_mean": lowerCamelCase_ = value elif weight_type == "running_var": lowerCamelCase_ = value elif weight_type == "num_batches_tracked": lowerCamelCase_ = value elif weight_type == "weight_ih_l0": lowerCamelCase_ = value elif weight_type == "weight_hh_l0": lowerCamelCase_ = value elif weight_type == "bias_ih_l0": lowerCamelCase_ = value elif weight_type == "bias_hh_l0": lowerCamelCase_ = value elif weight_type == "weight_ih_l1": lowerCamelCase_ = value elif weight_type == "weight_hh_l1": lowerCamelCase_ = value elif weight_type == "bias_ih_l1": lowerCamelCase_ = value elif weight_type == "bias_hh_l1": lowerCamelCase_ = value else: lowerCamelCase_ = value logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ): for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ): lowerCamelCase_ = [] if model_name == "encodec_24khz" or "encodec_32khz": lowerCamelCase_ = MAPPING_24K elif model_name == "encodec_48khz": lowerCamelCase_ = MAPPING_48K else: raise ValueError(F'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(UpperCAmelCase_ , UpperCAmelCase_ ): logger.info(F'''{name} was ignored''' ) continue lowerCamelCase_ = False for key, mapped_key in MAPPING.items(): if "*" in key: lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." ) if prefix in name and suffix in name: lowerCamelCase_ = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("embed" ) and name.endswith("embed_avg" ): continue lowerCamelCase_ = True if "*" in mapped_key: lowerCamelCase_ = name.split(UpperCAmelCase_ )[0].split("." )[-2] lowerCamelCase_ = mapped_key.replace("*" , UpperCAmelCase_ ) if "weight_g" in name: lowerCamelCase_ = "weight_g" elif "weight_v" in name: lowerCamelCase_ = "weight_v" elif "weight_ih_l0" in name: lowerCamelCase_ = "weight_ih_l0" elif "weight_hh_l0" in name: lowerCamelCase_ = "weight_hh_l0" elif "bias_ih_l0" in name: lowerCamelCase_ = "bias_ih_l0" elif "bias_hh_l0" in name: lowerCamelCase_ = "bias_hh_l0" elif "weight_ih_l1" in name: lowerCamelCase_ = "weight_ih_l1" elif "weight_hh_l1" in name: lowerCamelCase_ = "weight_hh_l1" elif "bias_ih_l1" in name: lowerCamelCase_ = "bias_ih_l1" elif "bias_hh_l1" in name: lowerCamelCase_ = "bias_hh_l1" elif "bias" in name: lowerCamelCase_ = "bias" elif "weight" in name: lowerCamelCase_ = "weight" elif "running_mean" in name: lowerCamelCase_ = "running_mean" elif "running_var" in name: lowerCamelCase_ = "running_var" elif "num_batches_tracked" in name: lowerCamelCase_ = "num_batches_tracked" else: lowerCamelCase_ = None set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) continue if not is_used: unused_weights.append(UpperCAmelCase_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) @torch.no_grad() def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , ): if config_path is not None: lowerCamelCase_ = EncodecConfig.from_pretrained(UpperCAmelCase_ ) else: lowerCamelCase_ = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": lowerCamelCase_ = [8, 5, 4, 4] lowerCamelCase_ = [2.2] lowerCamelCase_ = 64 lowerCamelCase_ = 32000 lowerCamelCase_ = 2048 lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False elif model_name == "encodec_48khz": lowerCamelCase_ = [8, 5, 4, 2] lowerCamelCase_ = [3.0, 6.0, 12.0, 24.0] lowerCamelCase_ = 48000 lowerCamelCase_ = 2 lowerCamelCase_ = False lowerCamelCase_ = "time_group_norm" lowerCamelCase_ = True lowerCamelCase_ = 1.0 lowerCamelCase_ = 0.01 else: raise ValueError(F'''Unknown model name: {model_name}''' ) lowerCamelCase_ = EncodecModel(UpperCAmelCase_ ) lowerCamelCase_ = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = torch.load(UpperCAmelCase_ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights lowerCamelCase_ = original_checkpoint["best_state"] recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) model.save_pretrained(UpperCAmelCase_ ) if repo_id: print("Pushing to the hub..." ) feature_extractor.push_to_hub(UpperCAmelCase_ ) model.push_to_hub(UpperCAmelCase_ ) if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() parser.add_argument( """--model""", default="""encodec_24khz""", type=str, help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""", ) parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) a_ : str = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
675
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = ["image_processor", "tokenizer"] _lowerCamelCase = "LayoutLMv3ImageProcessor" _lowerCamelCase = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCamelCase , ) lowerCamelCase_ = kwargs.pop("feature_extractor" ) lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCamelCase , UpperCamelCase ) def __call__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 0 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = False , UpperCamelCase = False , UpperCamelCase = False , UpperCamelCase = True , UpperCamelCase = None , **UpperCamelCase , ): """simple docstring""" # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor lowerCamelCase_ = self.image_processor(images=UpperCamelCase , return_tensors=UpperCamelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCamelCase_ = features["words"] lowerCamelCase_ = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , ) # add pixel values lowerCamelCase_ = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowerCamelCase_ = self.get_overflowing_images(UpperCamelCase , encoded_inputs["overflow_to_sample_mapping"] ) lowerCamelCase_ = images return encoded_inputs def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowerCamelCase_ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(UpperCamelCase ) != len(UpperCamelCase ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f''' {len(UpperCamelCase )} and {len(UpperCamelCase )}''' ) return images_with_overflow def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def snake_case ( self ): """simple docstring""" return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase , ) return self.image_processor_class @property def snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase , ) return self.image_processor
675
'''simple docstring''' from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = "arrow" , **UpperCamelCase , ): """simple docstring""" super().__init__( split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , **UpperCamelCase , ) lowerCamelCase_ = load_from_cache_file lowerCamelCase_ = file_format lowerCamelCase_ = Spark( df=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , working_dir=UpperCamelCase , **UpperCamelCase , ) def snake_case ( self ): """simple docstring""" if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCamelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
675
1
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = 13 lowerCamelCase_ = 7 lowerCamelCase_ = True lowerCamelCase_ = True lowerCamelCase_ = True lowerCamelCase_ = True lowerCamelCase_ = 99 lowerCamelCase_ = 384 lowerCamelCase_ = 2 lowerCamelCase_ = 4 lowerCamelCase_ = 37 lowerCamelCase_ = "gelu" lowerCamelCase_ = 0.1 lowerCamelCase_ = 0.1 lowerCamelCase_ = 512 lowerCamelCase_ = 16 lowerCamelCase_ = 2 lowerCamelCase_ = 0.02 lowerCamelCase_ = 3 lowerCamelCase_ = 4 lowerCamelCase_ = 128 lowerCamelCase_ = 2 lowerCamelCase_ = 9 lowerCamelCase_ = 1 lowerCamelCase_ = None def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFConvBertModel(config=UpperCamelCase ) lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCamelCase_ = [input_ids, input_mask] lowerCamelCase_ = model(UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFConvBertForMaskedLM(config=UpperCamelCase ) lowerCamelCase_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = TFConvBertForSequenceClassification(config=UpperCamelCase ) lowerCamelCase_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_choices lowerCamelCase_ = TFConvBertForMultipleChoice(config=UpperCamelCase ) lowerCamelCase_ = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase_ = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase_ = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) lowerCamelCase_ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = TFConvBertForTokenClassification(config=UpperCamelCase ) lowerCamelCase_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFConvBertForQuestionAnswering(config=UpperCamelCase ) lowerCamelCase_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) _lowerCamelCase = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFConvBertModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = True lowerCamelCase_ = True if hasattr(UpperCamelCase , "use_cache" ): lowerCamelCase_ = True lowerCamelCase_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) lowerCamelCase_ = getattr(self.model_tester , "key_length" , UpperCamelCase ) for model_class in self.all_model_classes: lowerCamelCase_ = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = len(model(UpperCamelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase , saved_model=UpperCamelCase ) lowerCamelCase_ = os.path.join(UpperCamelCase , "saved_model" , "1" ) lowerCamelCase_ = tf.keras.models.load_model(UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase ) if self.is_encoder_decoder: lowerCamelCase_ = outputs["encoder_hidden_states"] lowerCamelCase_ = outputs["encoder_attentions"] else: lowerCamelCase_ = outputs["hidden_states"] lowerCamelCase_ = outputs["attentions"] self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) lowerCamelCase_ = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = True lowerCamelCase_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) lowerCamelCase_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) lowerCamelCase_ = getattr(self.model_tester , "key_length" , UpperCamelCase ) lowerCamelCase_ = getattr(self.model_tester , "key_length" , UpperCamelCase ) def check_decoder_attentions_output(UpperCamelCase ): lowerCamelCase_ = len(UpperCamelCase ) self.assertEqual(out_len % 2 , 0 ) lowerCamelCase_ = outputs.decoder_attentions self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(UpperCamelCase ): lowerCamelCase_ = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = model(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) lowerCamelCase_ = len(UpperCamelCase ) self.assertEqual(config.output_hidden_states , UpperCamelCase ) check_encoder_attentions_output(UpperCamelCase ) if self.is_encoder_decoder: lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = model(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase ) check_decoder_attentions_output(UpperCamelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCamelCase_ = True lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = model(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase ) check_encoder_attentions_output(UpperCamelCase ) # Check attention is always last and order is fine lowerCamelCase_ = True lowerCamelCase_ = True lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = model(self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase ) ) self.assertEqual(model.config.output_hidden_states , UpperCamelCase ) check_encoder_attentions_output(UpperCamelCase ) @require_tf class snake_case ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase_ = model(UpperCamelCase )[0] lowerCamelCase_ = [1, 6, 768] self.assertEqual(output.shape , UpperCamelCase ) lowerCamelCase_ = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase , atol=1e-4 )
675
'''simple docstring''' def __snake_case ( ): lowerCamelCase_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] lowerCamelCase_ = 6 lowerCamelCase_ = 1 lowerCamelCase_ = 1901 lowerCamelCase_ = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 lowerCamelCase_ = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 lowerCamelCase_ = day - 29 else: if day > days_per_month[month - 1]: month += 1 lowerCamelCase_ = day - days_per_month[month - 2] if month > 12: year += 1 lowerCamelCase_ = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
675
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = KandinskyInpaintPipeline _lowerCamelCase = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"] _lowerCamelCase = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", "mask_image", ] _lowerCamelCase = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] _lowerCamelCase = False @property def snake_case ( self ): """simple docstring""" return 32 @property def snake_case ( self ): """simple docstring""" return 32 @property def snake_case ( self ): """simple docstring""" return self.time_input_dim @property def snake_case ( self ): """simple docstring""" return self.time_input_dim * 4 @property def snake_case ( self ): """simple docstring""" return 100 @property def snake_case ( self ): """simple docstring""" lowerCamelCase_ = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def snake_case ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) lowerCamelCase_ = MultilingualCLIP(UpperCamelCase ) lowerCamelCase_ = text_encoder.eval() return text_encoder @property def snake_case ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCamelCase_ = UNetaDConditionModel(**UpperCamelCase ) return model @property def snake_case ( self ): """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def snake_case ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ = VQModel(**self.dummy_movq_kwargs ) return model def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.dummy_text_encoder lowerCamelCase_ = self.dummy_tokenizer lowerCamelCase_ = self.dummy_unet lowerCamelCase_ = self.dummy_movq lowerCamelCase_ = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=UpperCamelCase , ) lowerCamelCase_ = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def snake_case ( self , UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" lowerCamelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) lowerCamelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase ) # create init_image lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase_ = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) ) # create mask lowerCamelCase_ = np.ones((64, 64) , dtype=np.floataa ) lowerCamelCase_ = 0 if str(UpperCamelCase ).startswith("mps" ): lowerCamelCase_ = torch.manual_seed(UpperCamelCase ) else: lowerCamelCase_ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) lowerCamelCase_ = { "prompt": "horse", "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "cpu" lowerCamelCase_ = self.get_dummy_components() lowerCamelCase_ = self.pipeline_class(**UpperCamelCase ) lowerCamelCase_ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowerCamelCase_ = pipe(**self.get_dummy_inputs(UpperCamelCase ) ) lowerCamelCase_ = output.images lowerCamelCase_ = pipe( **self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0] lowerCamelCase_ = image[0, -3:, -3:, -1] lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1] print(f'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) lowerCamelCase_ = np.array( [0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def snake_case ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self ): """simple docstring""" lowerCamelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" ) lowerCamelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowerCamelCase_ = np.ones((768, 768) , dtype=np.floataa ) lowerCamelCase_ = 0 lowerCamelCase_ = "a hat" lowerCamelCase_ = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(UpperCamelCase ) lowerCamelCase_ = KandinskyInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa ) lowerCamelCase_ = pipeline.to(UpperCamelCase ) pipeline.set_progress_bar_config(disable=UpperCamelCase ) lowerCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase_ ,lowerCamelCase_ = pipe_prior( UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() lowerCamelCase_ = pipeline( UpperCamelCase , image=UpperCamelCase , mask_image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type="np" , ) lowerCamelCase_ = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
675
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a_ : Optional[int] = logging.get_logger(__name__) a_ : Dict = { """SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""", # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = "deformable_detr" _lowerCamelCase = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=3 , UpperCamelCase=300 , UpperCamelCase=1024 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=0.0 , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=256 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=1.0 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="sine" , UpperCamelCase="resnet50" , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=False , UpperCamelCase=300 , UpperCamelCase=False , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=0.1 , UpperCamelCase=0.25 , UpperCamelCase=False , **UpperCamelCase , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) lowerCamelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = backbone_config.get("model_type" ) lowerCamelCase_ = CONFIG_MAPPING[backbone_model_type] lowerCamelCase_ = config_class.from_dict(UpperCamelCase ) lowerCamelCase_ = use_timm_backbone lowerCamelCase_ = backbone_config lowerCamelCase_ = num_channels lowerCamelCase_ = num_queries lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = d_model lowerCamelCase_ = encoder_ffn_dim lowerCamelCase_ = encoder_layers lowerCamelCase_ = encoder_attention_heads lowerCamelCase_ = decoder_ffn_dim lowerCamelCase_ = decoder_layers lowerCamelCase_ = decoder_attention_heads lowerCamelCase_ = dropout lowerCamelCase_ = attention_dropout lowerCamelCase_ = activation_dropout lowerCamelCase_ = activation_function lowerCamelCase_ = init_std lowerCamelCase_ = init_xavier_std lowerCamelCase_ = encoder_layerdrop lowerCamelCase_ = auxiliary_loss lowerCamelCase_ = position_embedding_type lowerCamelCase_ = backbone lowerCamelCase_ = use_pretrained_backbone lowerCamelCase_ = dilation # deformable attributes lowerCamelCase_ = num_feature_levels lowerCamelCase_ = encoder_n_points lowerCamelCase_ = decoder_n_points lowerCamelCase_ = two_stage lowerCamelCase_ = two_stage_num_proposals lowerCamelCase_ = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher lowerCamelCase_ = class_cost lowerCamelCase_ = bbox_cost lowerCamelCase_ = giou_cost # Loss coefficients lowerCamelCase_ = mask_loss_coefficient lowerCamelCase_ = dice_loss_coefficient lowerCamelCase_ = bbox_loss_coefficient lowerCamelCase_ = giou_loss_coefficient lowerCamelCase_ = eos_coefficient lowerCamelCase_ = focal_alpha lowerCamelCase_ = disable_custom_kernels super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase ) @property def snake_case ( self ): """simple docstring""" return self.encoder_attention_heads @property def snake_case ( self ): """simple docstring""" return self.d_model def snake_case ( self ): """simple docstring""" lowerCamelCase_ = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowerCamelCase_ = self.backbone_config.to_dict() lowerCamelCase_ = self.__class__.model_type return output
675
1
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def __snake_case ( UpperCAmelCase_ : str = "laptop" ): lowerCamelCase_ = F'''https://www.amazon.in/laptop/s?k={product}''' lowerCamelCase_ = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36", "Accept-Language": "en-US, en;q=0.5", } lowerCamelCase_ = BeautifulSoup(requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).text ) # Initialize a Pandas dataframe with the column titles lowerCamelCase_ = DataFrame( columns=[ "Product Title", "Product Link", "Current Price of the product", "Product Rating", "MRP of the product", "Discount", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( "div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ): try: lowerCamelCase_ = item.ha.text lowerCamelCase_ = "https://www.amazon.in/" + item.ha.a["href"] lowerCamelCase_ = item.find("span" , attrs={"class": "a-offscreen"} ).text try: lowerCamelCase_ = item.find("span" , attrs={"class": "a-icon-alt"} ).text except AttributeError: lowerCamelCase_ = "Not available" try: lowerCamelCase_ = ( "₹" + item.find( "span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1] ) except AttributeError: lowerCamelCase_ = "" try: lowerCamelCase_ = float( ( ( float(product_mrp.strip("₹" ).replace("," , "" ) ) - float(product_price.strip("₹" ).replace("," , "" ) ) ) / float(product_mrp.strip("₹" ).replace("," , "" ) ) ) * 100 ) except ValueError: lowerCamelCase_ = float("nan" ) except AttributeError: pass lowerCamelCase_ = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] lowerCamelCase_ = " " lowerCamelCase_ = " " data_frame.index += 1 return data_frame if __name__ == "__main__": a_ : Dict = """headphones""" get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
675
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class snake_case ( pl.LightningModule ): """simple docstring""" def __init__( self , UpperCamelCase ): """simple docstring""" super().__init__() lowerCamelCase_ = model lowerCamelCase_ = 2 lowerCamelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels ) def snake_case ( self ): """simple docstring""" pass def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str ): # load longformer model from model identifier lowerCamelCase_ = LongformerModel.from_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = LightningModel(UpperCAmelCase_ ) lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model lowerCamelCase_ = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase_ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(UpperCAmelCase_ ) print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' ) if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--longformer_model""", default=None, type=str, required=True, help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""", ) parser.add_argument( """--longformer_question_answering_ckpt_path""", default=None, type=str, required=True, help="""Path the official PyTorch Lightning Checkpoint.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Tuple = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
675
1
'''simple docstring''' from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup a_ : Optional[int] = """https://www.indeed.co.in/jobs?q=mobile+app+development&l=""" def __snake_case ( UpperCAmelCase_ : str = "mumbai" ): lowerCamelCase_ = BeautifulSoup(requests.get(url + location ).content , "html.parser" ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ): lowerCamelCase_ = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip() lowerCamelCase_ = job.find("span" , {"class": "company"} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs("""Bangalore"""), 1): print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
675
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a_ : Optional[Any] = { """configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""], """tokenization_ctrl""": ["""CTRLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : str = [ """CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """CTRLForSequenceClassification""", """CTRLLMHeadModel""", """CTRLModel""", """CTRLPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ """TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCTRLForSequenceClassification""", """TFCTRLLMHeadModel""", """TFCTRLModel""", """TFCTRLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
675
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = 13 lowerCamelCase_ = 7 lowerCamelCase_ = True lowerCamelCase_ = True lowerCamelCase_ = True lowerCamelCase_ = 99 lowerCamelCase_ = 32 lowerCamelCase_ = 2 lowerCamelCase_ = 4 lowerCamelCase_ = 37 lowerCamelCase_ = "gelu" lowerCamelCase_ = 0.1 lowerCamelCase_ = 0.1 lowerCamelCase_ = 512 lowerCamelCase_ = 16 lowerCamelCase_ = 2 lowerCamelCase_ = 0.02 lowerCamelCase_ = 3 lowerCamelCase_ = 4 lowerCamelCase_ = None def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self ): """simple docstring""" ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = self.prepare_config_and_inputs() lowerCamelCase_ = True lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFEsmModel(config=UpperCamelCase ) lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask} lowerCamelCase_ = model(UpperCamelCase ) lowerCamelCase_ = [input_ids, input_mask] lowerCamelCase_ = model(UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ): """simple docstring""" lowerCamelCase_ = True lowerCamelCase_ = TFEsmModel(config=UpperCamelCase ) lowerCamelCase_ = { "input_ids": input_ids, "attention_mask": input_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } lowerCamelCase_ = model(UpperCamelCase ) lowerCamelCase_ = [input_ids, input_mask] lowerCamelCase_ = model(UpperCamelCase , encoder_hidden_states=UpperCamelCase ) # Also check the case where encoder outputs are not passed lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFEsmForMaskedLM(config=UpperCamelCase ) lowerCamelCase_ = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = TFEsmForTokenClassification(config=UpperCamelCase ) lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask} lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) _lowerCamelCase = ( { "feature-extraction": TFEsmModel, "fill-mask": TFEsmForMaskedLM, "text-classification": TFEsmForSequenceClassification, "token-classification": TFEsmForTokenClassification, "zero-shot": TFEsmForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase = False _lowerCamelCase = False def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFEsmModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFEsmModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @unittest.skip("Protein models do not support embedding resizing." ) def snake_case ( self ): """simple docstring""" pass @unittest.skip("Protein models do not support embedding resizing." ) def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCamelCase ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer lowerCamelCase_ = model.get_bias() assert isinstance(UpperCamelCase , UpperCamelCase ) for k, v in name.items(): assert isinstance(UpperCamelCase , tf.Variable ) else: lowerCamelCase_ = model.get_output_embeddings() assert x is None lowerCamelCase_ = model.get_bias() assert name is None @require_tf class snake_case ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" ) lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase_ = model(UpperCamelCase )[0] lowerCamelCase_ = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , UpperCamelCase ) # compare the actual values for a slice. lowerCamelCase_ = tf.constant( [ [ [8.921_518, -10.589_814, -6.4_671_307], [-6.3_967_156, -13.911_377, -1.1_211_915], [-7.781_247, -13.951_557, -3.740_592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" ) lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCamelCase_ = model(UpperCamelCase )[0] # compare the actual values for a slice. lowerCamelCase_ = tf.constant( [ [ [0.14_443_092, 0.54_125_327, 0.3_247_739], [0.30_340_484, 0.00_526_676, 0.31_077_722], [0.32_278_043, -0.24_987_096, 0.3_414_628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
675
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a_ : Any = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = GPTSwaTokenizer _lowerCamelCase = False _lowerCamelCase = True _lowerCamelCase = False def snake_case ( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = "This is a test" lowerCamelCase_ = "This is a test" return input_text, output_text def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "<s>" lowerCamelCase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(UpperCamelCase ) , 2000 ) def snake_case ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 2000 ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase ) lowerCamelCase_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [465, 287, 265, 631, 842] ) lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) # fmt: off self.assertListEqual( UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , ) # fmt: on lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase ) self.assertListEqual( UpperCamelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) lowerCamelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) # fmt: off self.assertListEqual( UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ) # fmt: on def snake_case ( self ): """simple docstring""" lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase ) lowerCamelCase_ = ["This is a test", "I was born in 92000, and this is falsé."] lowerCamelCase_ = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(UpperCamelCase , UpperCamelCase ): self.assertListEqual(tokenizer.encode_fast(UpperCamelCase ) , UpperCamelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(tokenizer.decode_fast(UpperCamelCase ) , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = [ "<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off lowerCamelCase_ = {"input_ids": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=UpperCamelCase , )
675
1
'''simple docstring''' import sys a_ : Any = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = 1 for digit in s: product *= int(UpperCAmelCase_ ) return product def __snake_case ( UpperCAmelCase_ : str = N ): lowerCamelCase_ = -sys.maxsize - 1 lowerCamelCase_ = n[:13] lowerCamelCase_ = 13 while cur_index < len(UpperCAmelCase_ ) - 13: if int(n[cur_index] ) >= int(substr[0] ): lowerCamelCase_ = substr[1:] + n[cur_index] cur_index += 1 else: lowerCamelCase_ = max(UpperCAmelCase_ , str_eval(UpperCAmelCase_ ) ) lowerCamelCase_ = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
675
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = ["image_processor", "tokenizer"] _lowerCamelCase = "OwlViTImageProcessor" _lowerCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCamelCase , ) lowerCamelCase_ = kwargs.pop("feature_extractor" ) lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCamelCase , UpperCamelCase ) def __call__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="max_length" , UpperCamelCase="np" , **UpperCamelCase ): """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(UpperCamelCase , UpperCamelCase ) or (isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(text[0] , UpperCamelCase )): lowerCamelCase_ = [self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )] elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(text[0] , UpperCamelCase ): lowerCamelCase_ = [] # Maximum number of queries across batch lowerCamelCase_ = max([len(UpperCamelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(UpperCamelCase ) != max_num_queries: lowerCamelCase_ = t + [" "] * (max_num_queries - len(UpperCamelCase )) lowerCamelCase_ = self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) encodings.append(UpperCamelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": lowerCamelCase_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowerCamelCase_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowerCamelCase_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) lowerCamelCase_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowerCamelCase_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) lowerCamelCase_ = BatchEncoding() lowerCamelCase_ = input_ids lowerCamelCase_ = attention_mask if query_images is not None: lowerCamelCase_ = BatchEncoding() lowerCamelCase_ = self.image_processor( UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ).pixel_values lowerCamelCase_ = query_pixel_values if images is not None: lowerCamelCase_ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: lowerCamelCase_ = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowerCamelCase_ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process_object_detection(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process_image_guided_detection(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase , ) return self.image_processor_class @property def snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase , ) return self.image_processor
675
1
'''simple docstring''' def __snake_case ( UpperCAmelCase_ : int ): if length <= 0 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise ValueError("Length must be a positive integer." ) return [n * (2 * n - 1) for n in range(UpperCAmelCase_ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
675
'''simple docstring''' import os import sys import unittest a_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path a_ : Tuple = os.path.join(git_repo_path, """src""", """transformers""") a_ : List[Any] = """ {0} = None """ a_ : Optional[Any] = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ a_ : str = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" ) self.assertIsNone(UpperCamelCase ) lowerCamelCase_ = find_backend(" if not is_tokenizers_available():" ) self.assertEqual(UpperCamelCase , "tokenizers" ) lowerCamelCase_ = find_backend(" if not is_tensorflow_text_available():" ) self.assertEqual(UpperCamelCase , "tensorflow_text" ) lowerCamelCase_ = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers" ) lowerCamelCase_ = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tensorflow_text" ) lowerCamelCase_ = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers_and_vision" ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , UpperCamelCase ) self.assertIn("tensorflow_text" , UpperCamelCase ) self.assertIn("sentencepiece_and_tokenizers" , UpperCamelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertModel" , objects["tf"] ) self.assertIn("FlaxBertModel" , objects["flax"] ) self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] ) self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(UpperCamelCase , "\nCONSTANT = None\n" ) lowerCamelCase_ = create_dummy_object("function" , "'torch'" ) self.assertEqual( UpperCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) lowerCamelCase_ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n" lowerCamelCase_ = create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n" lowerCamelCase_ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , UpperCamelCase )
675
1
'''simple docstring''' from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. a_ : List[str] = 10 def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ): for i in range(UpperCAmelCase_ , UpperCAmelCase_ ): if array[i] == target: return i return -1 def __snake_case ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ): lowerCamelCase_ = 0 lowerCamelCase_ = len(UpperCAmelCase_ ) while left <= right: if right - left < precision: return lin_search(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = (left + right) // 3 + 1 lowerCamelCase_ = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: lowerCamelCase_ = one_third - 1 elif array[two_third] < target: lowerCamelCase_ = two_third + 1 else: lowerCamelCase_ = one_third + 1 lowerCamelCase_ = two_third - 1 else: return -1 def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ): if left < right: if right - left < precision: return lin_search(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = (left + right) // 3 + 1 lowerCamelCase_ = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(UpperCAmelCase_ , one_third - 1 , UpperCAmelCase_ , UpperCAmelCase_ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCAmelCase_ , UpperCAmelCase_ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() a_ : List[str] = input("""Enter numbers separated by comma:\n""").strip() a_ : Tuple = [int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), f"List must be ordered.\n{collection}." a_ : Optional[Any] = int(input("""Enter the number to be found in the list:\n""").strip()) a_ : str = ite_ternary_search(collection, target) a_ : Dict = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f'''Iterative search: {target} found at positions: {resulta}''') print(f'''Recursive search: {target} found at positions: {resulta}''') else: print("""Not found""")
675
'''simple docstring''' from ..utils import DummyObject, requires_backends class snake_case ( metaclass=lowercase ): """simple docstring""" _lowerCamelCase = ["onnx"] def __init__( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(self , ["onnx"] ) @classmethod def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(cls , ["onnx"] ) @classmethod def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(cls , ["onnx"] )
675
1
'''simple docstring''' def __snake_case ( ): lowerCamelCase_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] lowerCamelCase_ = 6 lowerCamelCase_ = 1 lowerCamelCase_ = 1901 lowerCamelCase_ = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 lowerCamelCase_ = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 lowerCamelCase_ = day - 29 else: if day > days_per_month[month - 1]: month += 1 lowerCamelCase_ = day - days_per_month[month - 2] if month > 12: year += 1 lowerCamelCase_ = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
675
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , UpperCamelCase=1000 , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope lowerCamelCase_ = range_bbox def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCamelCase_ = bbox[i, j, 3] lowerCamelCase_ = bbox[i, j, 1] lowerCamelCase_ = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCamelCase_ = bbox[i, j, 2] lowerCamelCase_ = bbox[i, j, 0] lowerCamelCase_ = t lowerCamelCase_ = tf.convert_to_tensor(UpperCamelCase ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModel(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMForMaskedLM(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = TFLayoutLMForSequenceClassification(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = TFLayoutLMForTokenClassification(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMForQuestionAnswering(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) _lowerCamelCase = ( { "feature-extraction": TFLayoutLMModel, "fill-mask": TFLayoutLMForMaskedLM, "text-classification": TFLayoutLMForSequenceClassification, "token-classification": TFLayoutLMForTokenClassification, "zero-shot": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase = False _lowerCamelCase = True _lowerCamelCase = 10 def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFLayoutLMModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def snake_case ( self ): """simple docstring""" pass def __snake_case ( ): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off lowerCamelCase_ = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) lowerCamelCase_ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class snake_case ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) # test the sequence output on [0, :3, :3] lowerCamelCase_ = tf.convert_to_tensor( [[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase , atol=1e-3 ) ) # test the pooled output on [1, :3] lowerCamelCase_ = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , UpperCamelCase , atol=1e-3 ) ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized sequence classification head lowerCamelCase_ = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model( input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar lowerCamelCase_ = outputs.loss lowerCamelCase_ = (2,) self.assertEqual(loss.shape , UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = outputs.logits lowerCamelCase_ = (2, 2) self.assertEqual(logits.shape , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized token classification head lowerCamelCase_ = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model( input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = outputs.logits lowerCamelCase_ = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized token classification head lowerCamelCase_ = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , UpperCamelCase ) self.assertEqual(outputs.end_logits.shape , UpperCamelCase )
675
1
'''simple docstring''' a_ : List[str] = """Alexander Joslin""" import operator as op from .stack import Stack def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub} lowerCamelCase_ = Stack() lowerCamelCase_ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(UpperCAmelCase_ ) ) elif i in operators: # RULE 2 operator_stack.push(UpperCAmelCase_ ) elif i == ")": # RULE 4 lowerCamelCase_ = operator_stack.peek() operator_stack.pop() lowerCamelCase_ = operand_stack.peek() operand_stack.pop() lowerCamelCase_ = operand_stack.peek() operand_stack.pop() lowerCamelCase_ = operators[opr](UpperCAmelCase_ , UpperCAmelCase_ ) operand_stack.push(UpperCAmelCase_ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": a_ : int = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
675
'''simple docstring''' import argparse from collections import defaultdict import yaml a_ : int = """docs/source/en/_toctree.yml""" def __snake_case ( UpperCAmelCase_ : Optional[int] ): lowerCamelCase_ = defaultdict(UpperCAmelCase_ ) lowerCamelCase_ = [] lowerCamelCase_ = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"local": doc["local"], "title": doc["title"]} ) else: new_doc_list.append(UpperCAmelCase_ ) lowerCamelCase_ = new_doc_list lowerCamelCase_ = [key for key, value in counts.items() if value > 1] lowerCamelCase_ = [] for duplicate_key in duplicates: lowerCamelCase_ = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} ) if len(UpperCAmelCase_ ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] ) lowerCamelCase_ = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(UpperCAmelCase_ ) > 1: raise ValueError("{doc_list} has two 'overview' docs which is not allowed." ) overview_doc.extend(UpperCAmelCase_ ) # Sort return overview_doc def __snake_case ( UpperCAmelCase_ : List[str]=False ): with open(UpperCAmelCase_ , encoding="utf-8" ) as f: lowerCamelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCamelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCamelCase_ = content[api_idx]["sections"] # Then to the model doc lowerCamelCase_ = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 lowerCamelCase_ = api_doc[scheduler_idx]["sections"] lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ ) lowerCamelCase_ = False if new_scheduler_doc != scheduler_doc: lowerCamelCase_ = True if overwrite: lowerCamelCase_ = new_scheduler_doc if diff: if overwrite: lowerCamelCase_ = api_doc with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) def __snake_case ( UpperCAmelCase_ : List[Any]=False ): with open(UpperCAmelCase_ , encoding="utf-8" ) as f: lowerCamelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCamelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCamelCase_ = content[api_idx]["sections"] # Then to the model doc lowerCamelCase_ = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 lowerCamelCase_ = False lowerCamelCase_ = api_doc[pipeline_idx]["sections"] lowerCamelCase_ = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: lowerCamelCase_ = pipeline_doc["section"] lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ ) if overwrite: lowerCamelCase_ = new_sub_pipeline_doc new_pipeline_docs.append(UpperCAmelCase_ ) # sort overall pipeline doc lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ ) if new_pipeline_docs != pipeline_docs: lowerCamelCase_ = True if overwrite: lowerCamelCase_ = new_pipeline_docs if diff: if overwrite: lowerCamelCase_ = api_doc with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": a_ : Tuple = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") a_ : int = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
675
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class snake_case ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=UpperCamelCase ).to(UpperCamelCase ) lowerCamelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" ) lowerCamelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids lowerCamelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids lowerCamelCase_ = model(input_ids.to(UpperCamelCase ) , labels=labels.to(UpperCamelCase ) ).loss lowerCamelCase_ = -(labels.shape[-1] * loss.item()) lowerCamelCase_ = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
675
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=1024 , UpperCAmelCase_ : Tuple=1024 , UpperCAmelCase_ : List[Any]=False , **UpperCAmelCase_ : Optional[Any] ): lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="train" , **UpperCAmelCase_ ) lowerCamelCase_ = tok.pad_token_id def get_lens(UpperCAmelCase_ : List[str] ): lowerCamelCase_ = tqdm( DataLoader(UpperCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) lowerCamelCase_ = [] for batch in dl: lowerCamelCase_ = batch["input_ids"].ne(UpperCAmelCase_ ).sum(1 ).tolist() lowerCamelCase_ = batch["labels"].ne(UpperCAmelCase_ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(UpperCAmelCase_ , UpperCAmelCase_ ): max_lens.append(max(UpperCAmelCase_ , UpperCAmelCase_ ) ) else: max_lens.extend(UpperCAmelCase_ ) return max_lens lowerCamelCase_ = get_lens(UpperCAmelCase_ ) lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="val" , **UpperCAmelCase_ ) lowerCamelCase_ = get_lens(UpperCAmelCase_ ) pickle_save(UpperCAmelCase_ , train_ds.len_file ) pickle_save(UpperCAmelCase_ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
675
1
'''simple docstring''' import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process a_ : Dict = logging.getLogger(__name__) a_ : Union[str, Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) a_ : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class snake_case : """simple docstring""" _lowerCamelCase = field( default=lowercase , metadata={ "help": ( "The model checkpoint for weights initialization. Leave None if you want to train a model from" " scratch." ) } , ) _lowerCamelCase = field( default=lowercase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowercase )} , ) _lowerCamelCase = field( default=lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) _lowerCamelCase = field( default=lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) _lowerCamelCase = field( default=lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class snake_case : """simple docstring""" _lowerCamelCase = field( default=lowercase , metadata={"help": "The input training data file (a text file)."} ) _lowerCamelCase = field( default=lowercase , metadata={ "help": ( "The input training data files (multiple files in glob format). " "Very often splitting large files to smaller files can prevent tokenizer going out of memory" ) } , ) _lowerCamelCase = field( default=lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) _lowerCamelCase = field( default=lowercase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , ) _lowerCamelCase = field( default=lowercase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , ) _lowerCamelCase = field( default=lowercase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , ) _lowerCamelCase = field( default=lowercase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} ) _lowerCamelCase = field(default=lowercase , metadata={"help": "Whether ot not to use whole word mask."} ) _lowerCamelCase = field( default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) _lowerCamelCase = field( default=1 / 6 , metadata={ "help": ( "Ratio of length of a span of masked tokens to surrounding context length for permutation language" " modeling." ) } , ) _lowerCamelCase = field( default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} ) _lowerCamelCase = field( default=-1 , metadata={ "help": ( "Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." "Default to the model max input length for single sentence inputs (take into account special tokens)." ) } , ) _lowerCamelCase = field( default=lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def __snake_case ( UpperCAmelCase_ : DataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[str] = None , ): def _dataset(UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" ) return LineByLineWithRefDataset( tokenizer=UpperCAmelCase_ , file_path=UpperCAmelCase_ , block_size=args.block_size , ref_path=UpperCAmelCase_ , ) return LineByLineTextDataset(tokenizer=UpperCAmelCase_ , file_path=UpperCAmelCase_ , block_size=args.block_size ) else: return TextDataset( tokenizer=UpperCAmelCase_ , file_path=UpperCAmelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCAmelCase_ , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(UpperCAmelCase_ ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def __snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( "Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file " "or remove the --do_eval argument." ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , UpperCAmelCase_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: lowerCamelCase_ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: lowerCamelCase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: lowerCamelCase_ = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.tokenizer_name: lowerCamelCase_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: lowerCamelCase_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another" " script, save it,and load it from here, using --tokenizer_name" ) if model_args.model_name_or_path: lowerCamelCase_ = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , ) else: logger.info("Training new model from scratch" ) lowerCamelCase_ = AutoModelWithLMHead.from_config(UpperCAmelCase_ ) model.resize_token_embeddings(len(UpperCAmelCase_ ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( "BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the" "--mlm flag (masked language modeling)." ) if data_args.block_size <= 0: lowerCamelCase_ = tokenizer.max_len # Our input block size will be the max possible for the model else: lowerCamelCase_ = min(data_args.block_size , tokenizer.max_len ) # Get datasets lowerCamelCase_ = ( get_dataset(UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) lowerCamelCase_ = ( get_dataset(UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , evaluate=UpperCAmelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": lowerCamelCase_ = DataCollatorForPermutationLanguageModeling( tokenizer=UpperCAmelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: lowerCamelCase_ = DataCollatorForWholeWordMask( tokenizer=UpperCAmelCase_ , mlm_probability=data_args.mlm_probability ) else: lowerCamelCase_ = DataCollatorForLanguageModeling( tokenizer=UpperCAmelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer lowerCamelCase_ = Trainer( model=UpperCAmelCase_ , args=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , prediction_loss_only=UpperCAmelCase_ , ) # Training if training_args.do_train: lowerCamelCase_ = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=UpperCAmelCase_ ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowerCamelCase_ = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) lowerCamelCase_ = trainer.evaluate() lowerCamelCase_ = math.exp(eval_output["eval_loss"] ) lowerCamelCase_ = {"perplexity": perplexity} lowerCamelCase_ = os.path.join(training_args.output_dir , "eval_results_lm.txt" ) if trainer.is_world_master(): with open(UpperCAmelCase_ , "w" ) as writer: logger.info("***** Eval results *****" ) for key in sorted(result.keys() ): logger.info(" %s = %s" , UpperCAmelCase_ , str(result[key] ) ) writer.write("%s = %s\n" % (key, str(result[key] )) ) results.update(UpperCAmelCase_ ) return results def __snake_case ( UpperCAmelCase_ : int ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
675
'''simple docstring''' def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key lowerCamelCase_ = remove_duplicates(key.upper() ) lowerCamelCase_ = len(UpperCAmelCase_ ) # First fill cipher with key characters lowerCamelCase_ = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase_ )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(UpperCAmelCase_ ) , 26 ): lowerCamelCase_ = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 lowerCamelCase_ = alphabet[i - offset] lowerCamelCase_ = char return cipher_alphabet def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict[str, str] ): return "".join(cipher_map.get(UpperCAmelCase_ , UpperCAmelCase_ ) for ch in message.upper() ) def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict[str, str] ): lowerCamelCase_ = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(UpperCAmelCase_ , UpperCAmelCase_ ) for ch in message.upper() ) def __snake_case ( ): lowerCamelCase_ = input("Enter message to encode or decode: " ).strip() lowerCamelCase_ = input("Enter keyword: " ).strip() lowerCamelCase_ = input("Encipher or decipher? E/D:" ).strip()[0].lower() try: lowerCamelCase_ = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError("invalid input option" ) lowerCamelCase_ = create_cipher_map(UpperCAmelCase_ ) print(func(UpperCAmelCase_ , UpperCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
675
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging a_ : Optional[Any] = logging.get_logger(__name__) a_ : Tuple = { """Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""", # See all Marian models at https://huggingface.co/models?filter=marian } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = "marian" _lowerCamelCase = ["past_key_values"] _lowerCamelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , UpperCamelCase=5_8101 , UpperCamelCase=None , UpperCamelCase=1024 , UpperCamelCase=12 , UpperCamelCase=4096 , UpperCamelCase=16 , UpperCamelCase=12 , UpperCamelCase=4096 , UpperCamelCase=16 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="gelu" , UpperCamelCase=1024 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=5_8100 , UpperCamelCase=False , UpperCamelCase=5_8100 , UpperCamelCase=0 , UpperCamelCase=0 , UpperCamelCase=True , **UpperCamelCase , ): """simple docstring""" lowerCamelCase_ = vocab_size lowerCamelCase_ = decoder_vocab_size or vocab_size lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = d_model lowerCamelCase_ = encoder_ffn_dim lowerCamelCase_ = encoder_layers lowerCamelCase_ = encoder_attention_heads lowerCamelCase_ = decoder_ffn_dim lowerCamelCase_ = decoder_layers lowerCamelCase_ = decoder_attention_heads lowerCamelCase_ = dropout lowerCamelCase_ = attention_dropout lowerCamelCase_ = activation_dropout lowerCamelCase_ = activation_function lowerCamelCase_ = init_std lowerCamelCase_ = encoder_layerdrop lowerCamelCase_ = decoder_layerdrop lowerCamelCase_ = use_cache lowerCamelCase_ = encoder_layers lowerCamelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ = share_encoder_decoder_embeddings super().__init__( pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , ) class snake_case ( lowercase ): """simple docstring""" @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def snake_case ( self ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: lowerCamelCase_ = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase_ = {0: "batch"} lowerCamelCase_ = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCamelCase_ = {0: "batch", 1: "decoder_sequence"} lowerCamelCase_ = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(UpperCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase_ = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowerCamelCase_ ,lowerCamelCase_ = self.num_layers for i in range(UpperCamelCase ): lowerCamelCase_ = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase_ = {0: "batch", 2: "past_sequence + sequence"} else: lowerCamelCase_ = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def snake_case ( self ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: lowerCamelCase_ = super().outputs else: lowerCamelCase_ = super(UpperCamelCase , self ).outputs if self.use_past: lowerCamelCase_ ,lowerCamelCase_ = self.num_layers for i in range(UpperCamelCase ): lowerCamelCase_ = {0: "batch", 2: "past_sequence + sequence"} lowerCamelCase_ = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def snake_case ( self , UpperCamelCase , UpperCamelCase = -1 , UpperCamelCase = -1 , UpperCamelCase = False , UpperCamelCase = None , ): """simple docstring""" lowerCamelCase_ = self._generate_dummy_inputs_for_encoder_and_decoder( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Generate decoder inputs lowerCamelCase_ = seq_length if not self.use_past else 1 lowerCamelCase_ = self._generate_dummy_inputs_for_encoder_and_decoder( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} lowerCamelCase_ = dict(**UpperCamelCase , **UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase_ ,lowerCamelCase_ = common_inputs["input_ids"].shape lowerCamelCase_ = common_inputs["decoder_input_ids"].shape[1] lowerCamelCase_ ,lowerCamelCase_ = self.num_attention_heads lowerCamelCase_ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase_ = decoder_seq_length + 3 lowerCamelCase_ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase_ = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(UpperCamelCase , UpperCamelCase )] , dim=1 ) lowerCamelCase_ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase_ ,lowerCamelCase_ = self.num_layers lowerCamelCase_ = min(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = max(UpperCamelCase , UpperCamelCase ) - min_num_layers lowerCamelCase_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(UpperCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase ), ) ) # TODO: test this. lowerCamelCase_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(UpperCamelCase , UpperCamelCase ): common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) ) return common_inputs def snake_case ( self , UpperCamelCase , UpperCamelCase = -1 , UpperCamelCase = -1 , UpperCamelCase = False , UpperCamelCase = None , ): """simple docstring""" lowerCamelCase_ = self._generate_dummy_inputs_for_encoder_and_decoder( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCamelCase_ ,lowerCamelCase_ = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCamelCase_ = seqlen + 2 lowerCamelCase_ ,lowerCamelCase_ = self.num_layers lowerCamelCase_ ,lowerCamelCase_ = self.num_attention_heads lowerCamelCase_ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase_ = common_inputs["attention_mask"].dtype lowerCamelCase_ = torch.cat( [common_inputs["attention_mask"], torch.ones(UpperCamelCase , UpperCamelCase , dtype=UpperCamelCase )] , dim=1 ) lowerCamelCase_ = [ (torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) for _ in range(UpperCamelCase ) ] return common_inputs def snake_case ( self , UpperCamelCase , UpperCamelCase = -1 , UpperCamelCase = -1 , UpperCamelCase = False , UpperCamelCase = None , ): """simple docstring""" # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCamelCase_ = compute_effective_axis_dimension( UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase_ = tokenizer.num_special_tokens_to_add(UpperCamelCase ) lowerCamelCase_ = compute_effective_axis_dimension( UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase_ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase_ = dict(tokenizer(UpperCamelCase , return_tensors=UpperCamelCase ) ) return common_inputs def snake_case ( self , UpperCamelCase , UpperCamelCase = -1 , UpperCamelCase = -1 , UpperCamelCase = False , UpperCamelCase = None , ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: lowerCamelCase_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase ) else: lowerCamelCase_ = self._generate_dummy_inputs_for_causal_lm( UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase ) return common_inputs def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: lowerCamelCase_ = super()._flatten_past_key_values_(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) else: lowerCamelCase_ = super(UpperCamelCase , self )._flatten_past_key_values_( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) @property def snake_case ( self ): """simple docstring""" return 1e-4
675
'''simple docstring''' import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = OpenAIGPTTokenizer _lowerCamelCase = OpenAIGPTTokenizerFast _lowerCamelCase = True _lowerCamelCase = False def snake_case ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) lowerCamelCase_ = ["#version: 0.2", "l o", "lo w", "e r</w>", ""] lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(UpperCamelCase ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(UpperCamelCase ) ) def snake_case ( self , UpperCamelCase ): """simple docstring""" return "lower newer", "lower newer" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) lowerCamelCase_ = "lower" lowerCamelCase_ = ["low", "er</w>"] lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = tokens + ["<unk>"] lowerCamelCase_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase ) def snake_case ( self , UpperCamelCase=15 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) # Simple input lowerCamelCase_ = "This is a simple input" lowerCamelCase_ = ["This is a simple input 1", "This is a simple input 2"] lowerCamelCase_ = ("This is a simple input", "This is a pair") lowerCamelCase_ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Simple input self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Simple input self.assertRaises( UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , ) # Pair input self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Pair input self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Pair input self.assertRaises( UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , ) def snake_case ( self ): """simple docstring""" pass @require_ftfy @require_spacy @require_tokenizers class snake_case ( lowercase ): """simple docstring""" pass
675
1
'''simple docstring''' from __future__ import annotations from typing import Generic, TypeVar a_ : Optional[int] = TypeVar("""T""") class snake_case ( Generic[T] ): """simple docstring""" def __init__( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = data lowerCamelCase_ = self lowerCamelCase_ = 0 class snake_case ( Generic[T] ): """simple docstring""" def __init__( self ): """simple docstring""" # map from node name to the node object lowerCamelCase_ = {} def snake_case ( self , UpperCamelCase ): """simple docstring""" # create a new set with x as its member lowerCamelCase_ = DisjointSetTreeNode(UpperCamelCase ) def snake_case ( self , UpperCamelCase ): """simple docstring""" # find the set x belongs to (with path-compression) lowerCamelCase_ = self.map[data] if elem_ref != elem_ref.parent: lowerCamelCase_ = self.find_set(elem_ref.parent.data ) return elem_ref.parent def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" # helper function for union operation if nodea.rank > nodea.rank: lowerCamelCase_ = nodea else: lowerCamelCase_ = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" # merge 2 disjoint sets self.link(self.find_set(UpperCamelCase ) , self.find_set(UpperCamelCase ) ) class snake_case ( Generic[T] ): """simple docstring""" def __init__( self ): """simple docstring""" # connections: map from the node to the neighbouring nodes (with weights) lowerCamelCase_ = {} def snake_case ( self , UpperCamelCase ): """simple docstring""" # add a node ONLY if its not present in the graph if node not in self.connections: lowerCamelCase_ = {} def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" # add an edge with the given weight self.add_node(UpperCamelCase ) self.add_node(UpperCamelCase ) lowerCamelCase_ = weight lowerCamelCase_ = weight def snake_case ( self ): """simple docstring""" lowerCamelCase_ = [] lowerCamelCase_ = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda UpperCamelCase : x[2] ) # creating the disjoint set lowerCamelCase_ = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(UpperCamelCase ) # MST generation lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = edges[index] index += 1 lowerCamelCase_ = disjoint_set.find_set(UpperCamelCase ) lowerCamelCase_ = disjoint_set.find_set(UpperCamelCase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(UpperCamelCase , UpperCamelCase , UpperCamelCase ) disjoint_set.union(UpperCamelCase , UpperCamelCase ) return graph
675
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer a_ : Optional[int] = logging.get_logger(__name__) a_ : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} a_ : int = { """vocab_file""": { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt""" ), } } a_ : Any = { """junnyu/roformer_chinese_small""": 1536, """junnyu/roformer_chinese_base""": 1536, """junnyu/roformer_chinese_char_small""": 512, """junnyu/roformer_chinese_char_base""": 512, """junnyu/roformer_small_discriminator""": 128, """junnyu/roformer_small_generator""": 128, } a_ : List[Any] = { """junnyu/roformer_chinese_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_base""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True}, """junnyu/roformer_small_discriminator""": {"""do_lower_case""": True}, """junnyu/roformer_small_generator""": {"""do_lower_case""": True}, } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase = RoFormerTokenizer def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ): """simple docstring""" super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , ) lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("lowercase" , UpperCamelCase ) != do_lower_case or pre_tok_state.get("strip_accents" , UpperCamelCase ) != strip_accents ): lowerCamelCase_ = getattr(UpperCamelCase , pre_tok_state.pop("type" ) ) lowerCamelCase_ = do_lower_case lowerCamelCase_ = strip_accents lowerCamelCase_ = pre_tok_class(**UpperCamelCase ) lowerCamelCase_ = do_lower_case def __getstate__( self ): """simple docstring""" lowerCamelCase_ = self.__dict__.copy() lowerCamelCase_ = BertPreTokenizer() return state def __setstate__( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = d lowerCamelCase_ = self.__dict__["_tokenizer"].get_vocab() lowerCamelCase_ = PreTokenizer.custom(JiebaPreTokenizer(UpperCamelCase ) ) def snake_case ( self , UpperCamelCase , UpperCamelCase=None ): """simple docstring""" lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , **UpperCamelCase , ): """simple docstring""" lowerCamelCase_ = BertPreTokenizer() return super().save_pretrained(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
675
1
'''simple docstring''' import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger a_ : int = get_logger(__name__) a_ : Union[str, Any] = R""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class snake_case : """simple docstring""" @add_start_docstrings(UpperCamelCase ) def __call__( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class snake_case : """simple docstring""" @add_start_docstrings(UpperCamelCase ) def __call__( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class snake_case ( lowercase ): """simple docstring""" @add_start_docstrings(UpperCamelCase ) def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ): """simple docstring""" for processor in self: lowerCamelCase_ = inspect.signature(processor.__call__ ).parameters if len(UpperCamelCase ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' f'''{processor.__class__} are passed to the logits processor.''' ) lowerCamelCase_ = processor(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ) else: lowerCamelCase_ = processor(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return scores class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ) or not (temperature > 0): raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' ) lowerCamelCase_ = temperature def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = scores / self.temperature return scores class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase = -float("Inf" ) , UpperCamelCase = 1 ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(UpperCamelCase , UpperCamelCase ) or (min_tokens_to_keep < 1): raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) lowerCamelCase_ = top_p lowerCamelCase_ = filter_value lowerCamelCase_ = min_tokens_to_keep def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = lax.top_k(UpperCamelCase , scores.shape[-1] ) lowerCamelCase_ = jnp.full_like(UpperCamelCase , self.filter_value ) lowerCamelCase_ = jax.nn.softmax(UpperCamelCase , axis=-1 ).cumsum(axis=-1 ) lowerCamelCase_ = cumulative_probs < self.top_p # include the token that is higher than top_p as well lowerCamelCase_ = jnp.roll(UpperCamelCase , 1 ) score_mask |= score_mask.at[:, 0].set(UpperCamelCase ) # min tokens to keep lowerCamelCase_ = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCamelCase ) lowerCamelCase_ = jnp.where(UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = jax.lax.sort_key_val(UpperCamelCase , UpperCamelCase )[-1] return next_scores class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase = -float("Inf" ) , UpperCamelCase = 1 ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ) or top_k <= 0: raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) lowerCamelCase_ = max(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = filter_value def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = scores.shape lowerCamelCase_ = jnp.full(batch_size * vocab_size , self.filter_value ) lowerCamelCase_ = min(self.top_k , scores.shape[-1] ) # Safety check lowerCamelCase_ ,lowerCamelCase_ = lax.top_k(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = jnp.broadcast_to((jnp.arange(UpperCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() lowerCamelCase_ = topk_scores.flatten() lowerCamelCase_ = topk_indices.flatten() + shift lowerCamelCase_ = next_scores_flat.at[topk_indices_flat].set(UpperCamelCase ) lowerCamelCase_ = next_scores_flat.reshape(UpperCamelCase , UpperCamelCase ) return next_scores class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = bos_token_id def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = jnp.full(scores.shape , -float("inf" ) ) lowerCamelCase_ = 1 - jnp.bool_(cur_len - 1 ) lowerCamelCase_ = jnp.where(UpperCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCamelCase ) return scores class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = max_length lowerCamelCase_ = eos_token_id def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = jnp.full(scores.shape , -float("inf" ) ) lowerCamelCase_ = 1 - jnp.bool_(cur_len - self.max_length + 1 ) lowerCamelCase_ = jnp.where(UpperCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCamelCase ) return scores class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ) or min_length < 0: raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(UpperCamelCase , UpperCamelCase ) or eos_token_id < 0: raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) lowerCamelCase_ = min_length lowerCamelCase_ = eos_token_id def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" # create boolean flag to decide if min length penalty should be applied lowerCamelCase_ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) lowerCamelCase_ = jnp.where(UpperCamelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCamelCase ) return scores class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = list(UpperCamelCase ) lowerCamelCase_ = begin_index def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = 1 - jnp.bool_(cur_len - self.begin_index ) lowerCamelCase_ = jnp.where(UpperCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCamelCase ) return scores class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = list(UpperCamelCase ) def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = dict(UpperCamelCase ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. lowerCamelCase_ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: lowerCamelCase_ = force_token_array.at[index].set(UpperCamelCase ) lowerCamelCase_ = jnp.intaa(UpperCamelCase ) def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" def _force_token(UpperCamelCase ): lowerCamelCase_ = scores.shape[0] lowerCamelCase_ = self.force_token_array[generation_idx] lowerCamelCase_ = jnp.ones_like(UpperCamelCase , dtype=scores.dtype ) * -float("inf" ) lowerCamelCase_ = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) lowerCamelCase_ = lax.dynamic_update_slice(UpperCamelCase , UpperCamelCase , (0, current_token) ) return new_scores lowerCamelCase_ = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCamelCase ) , lambda: scores , ) , ) return scores class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = generate_config.eos_token_id lowerCamelCase_ = generate_config.no_timestamps_token_id lowerCamelCase_ = generate_config.no_timestamps_token_id + 1 lowerCamelCase_ = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(UpperCamelCase , "max_initial_timestamp_index" ): lowerCamelCase_ = generate_config.max_initial_timestamp_index else: lowerCamelCase_ = model_config.vocab_size if self.max_initial_timestamp_index is None: lowerCamelCase_ = model_config.vocab_size def __call__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" # suppress <|notimestamps|> which is handled by without_timestamps lowerCamelCase_ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = jnp.where((cur_len - self.begin_index) >= 1 , UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCamelCase , ) lowerCamelCase_ = jnp.where((cur_len - self.begin_index) < 2 , UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCamelCase , UpperCamelCase , ) return jnp.where( UpperCamelCase , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCamelCase , ) lowerCamelCase_ = jax.vmap(UpperCamelCase )(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = jnp.where(cur_len == self.begin_index , UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCamelCase , ) lowerCamelCase_ = self.timestamp_begin + self.max_initial_timestamp_index lowerCamelCase_ = jnp.where( UpperCamelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCamelCase , ) # if sum of probability over timestamps is above any other token, sample timestamp lowerCamelCase_ = jax.nn.log_softmax(UpperCamelCase , axis=-1 ) def handle_cumulative_probs(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) lowerCamelCase_ = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCamelCase , ) lowerCamelCase_ = jax.vmap(UpperCamelCase )(UpperCamelCase , UpperCamelCase ) return scores
675
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=32 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=[10, 20, 30, 40] , UpperCamelCase=[2, 2, 3, 2] , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=10 , UpperCamelCase=0.02 , UpperCamelCase=["stage2", "stage3", "stage4"] , UpperCamelCase=3 , UpperCamelCase=None , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = num_channels lowerCamelCase_ = num_stages lowerCamelCase_ = hidden_sizes lowerCamelCase_ = depths lowerCamelCase_ = is_training lowerCamelCase_ = use_labels lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = out_features lowerCamelCase_ = num_labels lowerCamelCase_ = scope lowerCamelCase_ = num_stages def snake_case ( self ): """simple docstring""" lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def snake_case ( self ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def snake_case ( self ): """simple docstring""" return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = UperNetForSemanticSegmentation(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = (UperNetForSemanticSegmentation,) if is_torch_available() else () _lowerCamelCase = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def snake_case ( self ): """simple docstring""" lowerCamelCase_ = UperNetModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self ): """simple docstring""" return def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase ) @unittest.skip(reason="UperNet does not use inputs_embeds" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="UperNet does not support input and output embeddings" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="UperNet does not have a base model" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="UperNet does not have a base model" ) def snake_case ( self ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = _config_zero_init(UpperCamelCase ) lowerCamelCase_ = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowerCamelCase_ = model_class(config=UpperCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip(reason="UperNet does not have tied weights" ) def snake_case ( self ): """simple docstring""" pass @slow def snake_case ( self ): """simple docstring""" for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def __snake_case ( ): lowerCamelCase_ = hf_hub_download( repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" ) lowerCamelCase_ = Image.open(UpperCAmelCase_ ).convert("RGB" ) return image @require_torch @require_vision @slow class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" ) lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(UpperCamelCase ) lowerCamelCase_ = prepare_img() lowerCamelCase_ = processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase ) with torch.no_grad(): lowerCamelCase_ = model(**UpperCamelCase ) lowerCamelCase_ = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowerCamelCase_ = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" ) lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(UpperCamelCase ) lowerCamelCase_ = prepare_img() lowerCamelCase_ = processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase ) with torch.no_grad(): lowerCamelCase_ = model(**UpperCamelCase ) lowerCamelCase_ = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowerCamelCase_ = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
675
1
'''simple docstring''' import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() a_ : Dict = logging.get_logger(__name__) def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any ): lowerCamelCase_ = UniSpeechSatForSequenceClassification.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ ) lowerCamelCase_ = downstream_dict["projector.weight"] lowerCamelCase_ = downstream_dict["projector.bias"] lowerCamelCase_ = downstream_dict["model.post_net.linear.weight"] lowerCamelCase_ = downstream_dict["model.post_net.linear.bias"] return model def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any ): lowerCamelCase_ = UniSpeechSatForAudioFrameClassification.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ ) lowerCamelCase_ = downstream_dict["model.linear.weight"] lowerCamelCase_ = downstream_dict["model.linear.bias"] return model def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ): lowerCamelCase_ = UniSpeechSatForXVector.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ ) lowerCamelCase_ = downstream_dict["connector.weight"] lowerCamelCase_ = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): lowerCamelCase_ = downstream_dict[ F'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] lowerCamelCase_ = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] lowerCamelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] lowerCamelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] lowerCamelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] lowerCamelCase_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] lowerCamelCase_ = downstream_dict["objective.W"] return model @torch.no_grad() def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] ): lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location="cpu" ) lowerCamelCase_ = checkpoint["Downstream"] lowerCamelCase_ = UniSpeechSatConfig.from_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = WavaVecaFeatureExtractor.from_pretrained( UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ ) lowerCamelCase_ = hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): lowerCamelCase_ = convert_classification(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) elif arch.endswith("ForAudioFrameClassification" ): lowerCamelCase_ = convert_diarization(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) elif arch.endswith("ForXVector" ): lowerCamelCase_ = convert_xvector(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: lowerCamelCase_ = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(UpperCAmelCase_ ) hf_model.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": a_ : int = argparse.ArgumentParser() parser.add_argument( """--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model.""" ) parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""") parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""") a_ : Optional[int] = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
675
'''simple docstring''' from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration a_ : Optional[int] = HfArgumentParser(InitializationArguments) a_ : str = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization a_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks a_ : str = { """vocab_size""": len(tokenizer), """scale_attn_by_inverse_layer_idx""": True, """reorder_and_upcast_attn""": True, } # Load model config (GPT-2 large in this case) a_ : Optional[Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config a_ : Optional[Any] = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
675
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a_ : Optional[int] = logging.get_logger(__name__) a_ : Dict = { """SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""", # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = "deformable_detr" _lowerCamelCase = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=3 , UpperCamelCase=300 , UpperCamelCase=1024 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=0.0 , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=256 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=1.0 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="sine" , UpperCamelCase="resnet50" , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=False , UpperCamelCase=300 , UpperCamelCase=False , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=0.1 , UpperCamelCase=0.25 , UpperCamelCase=False , **UpperCamelCase , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) lowerCamelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = backbone_config.get("model_type" ) lowerCamelCase_ = CONFIG_MAPPING[backbone_model_type] lowerCamelCase_ = config_class.from_dict(UpperCamelCase ) lowerCamelCase_ = use_timm_backbone lowerCamelCase_ = backbone_config lowerCamelCase_ = num_channels lowerCamelCase_ = num_queries lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = d_model lowerCamelCase_ = encoder_ffn_dim lowerCamelCase_ = encoder_layers lowerCamelCase_ = encoder_attention_heads lowerCamelCase_ = decoder_ffn_dim lowerCamelCase_ = decoder_layers lowerCamelCase_ = decoder_attention_heads lowerCamelCase_ = dropout lowerCamelCase_ = attention_dropout lowerCamelCase_ = activation_dropout lowerCamelCase_ = activation_function lowerCamelCase_ = init_std lowerCamelCase_ = init_xavier_std lowerCamelCase_ = encoder_layerdrop lowerCamelCase_ = auxiliary_loss lowerCamelCase_ = position_embedding_type lowerCamelCase_ = backbone lowerCamelCase_ = use_pretrained_backbone lowerCamelCase_ = dilation # deformable attributes lowerCamelCase_ = num_feature_levels lowerCamelCase_ = encoder_n_points lowerCamelCase_ = decoder_n_points lowerCamelCase_ = two_stage lowerCamelCase_ = two_stage_num_proposals lowerCamelCase_ = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher lowerCamelCase_ = class_cost lowerCamelCase_ = bbox_cost lowerCamelCase_ = giou_cost # Loss coefficients lowerCamelCase_ = mask_loss_coefficient lowerCamelCase_ = dice_loss_coefficient lowerCamelCase_ = bbox_loss_coefficient lowerCamelCase_ = giou_loss_coefficient lowerCamelCase_ = eos_coefficient lowerCamelCase_ = focal_alpha lowerCamelCase_ = disable_custom_kernels super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase ) @property def snake_case ( self ): """simple docstring""" return self.encoder_attention_heads @property def snake_case ( self ): """simple docstring""" return self.d_model def snake_case ( self ): """simple docstring""" lowerCamelCase_ = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowerCamelCase_ = self.backbone_config.to_dict() lowerCamelCase_ = self.__class__.model_type return output
675
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version a_ : List[Any] = get_logger(__name__) class snake_case : """simple docstring""" _lowerCamelCase = "dummy_data" _lowerCamelCase = "datasets" _lowerCamelCase = False def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = True , UpperCamelCase = None , ): """simple docstring""" lowerCamelCase_ = 0 lowerCamelCase_ = dataset_name lowerCamelCase_ = cache_dir lowerCamelCase_ = use_local_dummy_data lowerCamelCase_ = config # download_callbacks take a single url as input lowerCamelCase_ = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root lowerCamelCase_ = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general lowerCamelCase_ = str(UpperCamelCase ) # to be downloaded lowerCamelCase_ = None lowerCamelCase_ = None @property def snake_case ( self ): """simple docstring""" if self._dummy_file is None: lowerCamelCase_ = self.download_dummy_data() return self._dummy_file @property def snake_case ( self ): """simple docstring""" if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def snake_case ( self ): """simple docstring""" return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) lowerCamelCase_ = cached_path( UpperCamelCase , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase , force_extract=UpperCamelCase ) return os.path.join(UpperCamelCase , self.dummy_file_name ) @property def snake_case ( self ): """simple docstring""" return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def snake_case ( self ): """simple docstring""" if self._bucket_url is None: lowerCamelCase_ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def snake_case ( self ): """simple docstring""" # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def snake_case ( self , UpperCamelCase , *UpperCamelCase ): """simple docstring""" if self.load_existing_dummy_data: # dummy data is downloaded and tested lowerCamelCase_ = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned lowerCamelCase_ = self.dummy_file_name # special case when data_url is a dict if isinstance(UpperCamelCase , UpperCamelCase ): return self.create_dummy_data_dict(UpperCamelCase , UpperCamelCase ) elif isinstance(UpperCamelCase , (list, tuple) ): return self.create_dummy_data_list(UpperCamelCase , UpperCamelCase ) else: return self.create_dummy_data_single(UpperCamelCase , UpperCamelCase ) def snake_case ( self , UpperCamelCase , *UpperCamelCase ): """simple docstring""" return self.download_and_extract(UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" return self.download_and_extract(UpperCamelCase ) def snake_case ( self , UpperCamelCase , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return path def snake_case ( self ): """simple docstring""" return {} def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(UpperCamelCase , UpperCamelCase ): for single_url in single_urls: download_callback(UpperCamelCase ) else: lowerCamelCase_ = single_urls download_callback(UpperCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = [os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) ) for x in single_urls] else: lowerCamelCase_ = single_urls lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) ) lowerCamelCase_ = value # make sure that values are unique if all(isinstance(UpperCamelCase , UpperCamelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique lowerCamelCase_ = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one lowerCamelCase_ = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , UpperCamelCase ) ) for url in data_url ) lowerCamelCase_ = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): lowerCamelCase_ = [data_url[0]] * len(UpperCamelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(UpperCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(UpperCamelCase ) return dummy_data_list def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" for download_callback in self.download_callbacks: download_callback(UpperCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(UpperCamelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" pass def snake_case ( self , UpperCamelCase ): """simple docstring""" def _iter_archive_members(UpperCamelCase ): # this preserves the order of the members inside the ZIP archive lowerCamelCase_ = Path(self.dummy_file ).parent lowerCamelCase_ = path.relative_to(UpperCamelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: lowerCamelCase_ = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(UpperCamelCase ) lowerCamelCase_ = Path(UpperCamelCase ) lowerCamelCase_ = _iter_archive_members(UpperCamelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(UpperCamelCase ).as_posix(), file_path.open("rb" ) def snake_case ( self , UpperCamelCase ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = [paths] for path in paths: if os.path.isfile(UpperCamelCase ): if os.path.basename(UpperCamelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(UpperCamelCase ): if os.path.basename(UpperCamelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(UpperCamelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(UpperCamelCase , UpperCamelCase )
675
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) a_ : Tuple = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : str = ["""LayoutLMv2FeatureExtractor"""] a_ : Union[str, Any] = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys a_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
675
'''simple docstring''' import os def __snake_case ( UpperCAmelCase_ : str = "matrix.txt" ): with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as in_file: lowerCamelCase_ = in_file.read() lowerCamelCase_ = [[int(UpperCAmelCase_ ) for cell in row.split("," )] for row in data.strip().splitlines()] lowerCamelCase_ = [[0 for cell in row] for row in grid] lowerCamelCase_ = len(grid[0] ) lowerCamelCase_ = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )] lowerCamelCase_ = grid[0][0] for i in range(1 , UpperCAmelCase_ ): lowerCamelCase_ = grid[0][i] + dp[0][i - 1] for i in range(1 , UpperCAmelCase_ ): lowerCamelCase_ = grid[i][0] + dp[i - 1][0] for i in range(1 , UpperCAmelCase_ ): for j in range(1 , UpperCAmelCase_ ): lowerCamelCase_ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(f'''{solution() = }''')
675
1
'''simple docstring''' def __snake_case ( ): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] a_ : Any = generate_large_matrix() a_ : Optional[Any] = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __snake_case ( UpperCAmelCase_ : list[list[int]] ): assert all(row == sorted(UpperCAmelCase_ , reverse=UpperCAmelCase_ ) for row in grid ) assert all(list(UpperCAmelCase_ ) == sorted(UpperCAmelCase_ , reverse=UpperCAmelCase_ ) for col in zip(*UpperCAmelCase_ ) ) def __snake_case ( UpperCAmelCase_ : list[int] ): lowerCamelCase_ = 0 lowerCamelCase_ = len(UpperCAmelCase_ ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCamelCase_ = (left + right) // 2 lowerCamelCase_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCamelCase_ = mid + 1 else: lowerCamelCase_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(UpperCAmelCase_ ) def __snake_case ( UpperCAmelCase_ : list[list[int]] ): lowerCamelCase_ = 0 lowerCamelCase_ = len(grid[0] ) for i in range(len(UpperCAmelCase_ ) ): lowerCamelCase_ = find_negative_index(grid[i][:bound] ) total += bound return (len(UpperCAmelCase_ ) * len(grid[0] )) - total def __snake_case ( UpperCAmelCase_ : list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def __snake_case ( UpperCAmelCase_ : list[list[int]] ): lowerCamelCase_ = 0 for row in grid: for i, number in enumerate(UpperCAmelCase_ ): if number < 0: total += len(UpperCAmelCase_ ) - i break return total def __snake_case ( ): from timeit import timeit print("Running benchmarks" ) lowerCamelCase_ = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCamelCase_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCAmelCase_ , number=500 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
675
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = inspect.getfile(accelerate.test_utils ) lowerCamelCase_ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 lowerCamelCase_ = test_metrics @require_cpu def snake_case ( self ): """simple docstring""" debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def snake_case ( self ): """simple docstring""" debug_launcher(self.test_metrics.main ) @require_single_gpu def snake_case ( self ): """simple docstring""" self.test_metrics.main() @require_multi_gpu def snake_case ( self ): """simple docstring""" print(f'''Found {torch.cuda.device_count()} devices.''' ) lowerCamelCase_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
675
1
'''simple docstring''' import qiskit def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ): lowerCamelCase_ = qiskit.Aer.get_backend("aer_simulator" ) # Create a Quantum Circuit acting on the q register lowerCamelCase_ = qiskit.QuantumCircuit(UpperCAmelCase_ , UpperCAmelCase_ ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1] ) # Execute the circuit on the qasm simulator lowerCamelCase_ = qiskit.execute(UpperCAmelCase_ , UpperCAmelCase_ , shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(UpperCAmelCase_ ) if __name__ == "__main__": a_ : int = single_qubit_measure(2, 2) print(f'''Total count for various states are: {counts}''')
675
'''simple docstring''' from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge a_ : Any = [ """Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the""" """ final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe""" """ depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""", """The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal""" """ accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's""" """ founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the""" """ body.""", """Amnesty International releases its annual report on the death penalty. The report catalogs the use of""" """ state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the""" """ world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital""" """ punishment.""", ] a_ : Optional[Any] = [ """Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""" """ Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz""" """ had informed his Lufthansa training school of an episode of severe depression, airline says .""", """Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .""" """ Israel and the United States opposed the move, which could open the door to war crimes investigations against""" """ Israelis .""", """Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to""" """ death . Organization claims that governments around the world are using the threat of terrorism to advance""" """ executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death""" """ sentences up by 28% .""", ] def __snake_case ( ): lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2", "rougeL"] ) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2"] ) assert ( pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean() ) def __snake_case ( ): lowerCamelCase_ = "rougeLsum" lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k] lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k] assert score > score_no_sep def __snake_case ( ): lowerCamelCase_ = ["rouge1", "rouge2", "rougeL"] lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ ) lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ ) assert score_sep == score_no_sep def __snake_case ( ): lowerCamelCase_ = [ "Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.", "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .", ] lowerCamelCase_ = [ "Margot Frank, died in 1945, a month earlier than previously thought.", "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of" " the final seconds on board Flight 9525.", ] assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) def __snake_case ( ): lowerCamelCase_ = [ "\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" " ] lowerCamelCase_ = [ " Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ." ] lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] , newline_sep=UpperCAmelCase_ )["rougeLsum"] lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] )["rougeLsum"] assert new_score > prev_score def __snake_case ( ): lowerCamelCase_ = Path("examples/seq2seq/test_data/wmt_en_ro" ) lowerCamelCase_ = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) ) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = calculate_rouge_path( data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=UpperCAmelCase_ ) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
675
1
'''simple docstring''' import os from collections.abc import Iterator def __snake_case ( UpperCAmelCase_ : str = "." ): for dir_path, dir_names, filenames in os.walk(UpperCAmelCase_ ): lowerCamelCase_ = [d for d in dir_names if d != "scripts" and d[0] not in "._"] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(UpperCAmelCase_ )[1] in (".py", ".ipynb"): yield os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ).lstrip("./" ) def __snake_case ( UpperCAmelCase_ : str ): return F'''{i * " "}*''' if i else "\n##" def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ): lowerCamelCase_ = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(UpperCAmelCase_ ) or old_parts[i] != new_part) and new_part: print(F'''{md_prefix(UpperCAmelCase_ )} {new_part.replace("_" , " " ).title()}''' ) return new_path def __snake_case ( UpperCAmelCase_ : str = "." ): lowerCamelCase_ = "" for filepath in sorted(good_file_paths(UpperCAmelCase_ ) ): lowerCamelCase_ ,lowerCamelCase_ = os.path.split(UpperCAmelCase_ ) if filepath != old_path: lowerCamelCase_ = print_path(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = (filepath.count(os.sep ) + 1) if filepath else 0 lowerCamelCase_ = F'''{filepath}/{filename}'''.replace(" " , "%20" ) lowerCamelCase_ = os.path.splitext(filename.replace("_" , " " ).title() )[0] print(F'''{md_prefix(UpperCAmelCase_ )} [{filename}]({url})''' ) if __name__ == "__main__": print_directory_md(""".""")
675
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() a_ : Optional[Any] = logging.get_logger("""transformers.models.encodec""") a_ : List[str] = { """quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""", """quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""", """quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""", """quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""", } a_ : Optional[int] = { """encoder.model.0.conv.conv""": """encoder.layers.0.conv""", """encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""", """encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""", """encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""", """encoder.model.3.conv.conv""": """encoder.layers.3.conv""", """encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""", """encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""", """encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""", """encoder.model.6.conv.conv""": """encoder.layers.6.conv""", """encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""", """encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""", """encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""", """encoder.model.9.conv.conv""": """encoder.layers.9.conv""", """encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""", """encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""", """encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""", """encoder.model.12.conv.conv""": """encoder.layers.12.conv""", """encoder.model.13.lstm""": """encoder.layers.13.lstm""", """encoder.model.15.conv.conv""": """encoder.layers.15.conv""", } a_ : Tuple = { """encoder.model.0.conv.norm""": """encoder.layers.0.norm""", """encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""", """encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""", """encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""", """encoder.model.3.conv.norm""": """encoder.layers.3.norm""", """encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""", """encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""", """encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""", """encoder.model.6.conv.norm""": """encoder.layers.6.norm""", """encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""", """encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""", """encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""", """encoder.model.9.conv.norm""": """encoder.layers.9.norm""", """encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""", """encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""", """encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""", """encoder.model.12.conv.norm""": """encoder.layers.12.norm""", """encoder.model.15.conv.norm""": """encoder.layers.15.norm""", } a_ : Union[str, Any] = { """decoder.model.0.conv.conv""": """decoder.layers.0.conv""", """decoder.model.1.lstm""": """decoder.layers.1.lstm""", """decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""", """decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""", """decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""", """decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""", """decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""", """decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""", """decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""", """decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""", """decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""", """decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""", """decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""", """decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""", """decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""", """decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""", """decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""", """decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""", """decoder.model.15.conv.conv""": """decoder.layers.15.conv""", } a_ : Union[str, Any] = { """decoder.model.0.conv.norm""": """decoder.layers.0.norm""", """decoder.model.3.convtr.norm""": """decoder.layers.3.norm""", """decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""", """decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""", """decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""", """decoder.model.6.convtr.norm""": """decoder.layers.6.norm""", """decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""", """decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""", """decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""", """decoder.model.9.convtr.norm""": """decoder.layers.9.norm""", """decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""", """decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""", """decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""", """decoder.model.12.convtr.norm""": """decoder.layers.12.norm""", """decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""", """decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""", """decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""", """decoder.model.15.conv.norm""": """decoder.layers.15.norm""", } a_ : Optional[Any] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } a_ : List[str] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } a_ : Any = [] a_ : str = [] def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ): for attribute in key.split("." ): lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) if weight_type is not None: lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape else: lowerCamelCase_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase_ = value elif weight_type == "weight_g": lowerCamelCase_ = value elif weight_type == "weight_v": lowerCamelCase_ = value elif weight_type == "bias": lowerCamelCase_ = value elif weight_type == "running_mean": lowerCamelCase_ = value elif weight_type == "running_var": lowerCamelCase_ = value elif weight_type == "num_batches_tracked": lowerCamelCase_ = value elif weight_type == "weight_ih_l0": lowerCamelCase_ = value elif weight_type == "weight_hh_l0": lowerCamelCase_ = value elif weight_type == "bias_ih_l0": lowerCamelCase_ = value elif weight_type == "bias_hh_l0": lowerCamelCase_ = value elif weight_type == "weight_ih_l1": lowerCamelCase_ = value elif weight_type == "weight_hh_l1": lowerCamelCase_ = value elif weight_type == "bias_ih_l1": lowerCamelCase_ = value elif weight_type == "bias_hh_l1": lowerCamelCase_ = value else: lowerCamelCase_ = value logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ): for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ): lowerCamelCase_ = [] if model_name == "encodec_24khz" or "encodec_32khz": lowerCamelCase_ = MAPPING_24K elif model_name == "encodec_48khz": lowerCamelCase_ = MAPPING_48K else: raise ValueError(F'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(UpperCAmelCase_ , UpperCAmelCase_ ): logger.info(F'''{name} was ignored''' ) continue lowerCamelCase_ = False for key, mapped_key in MAPPING.items(): if "*" in key: lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." ) if prefix in name and suffix in name: lowerCamelCase_ = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("embed" ) and name.endswith("embed_avg" ): continue lowerCamelCase_ = True if "*" in mapped_key: lowerCamelCase_ = name.split(UpperCAmelCase_ )[0].split("." )[-2] lowerCamelCase_ = mapped_key.replace("*" , UpperCAmelCase_ ) if "weight_g" in name: lowerCamelCase_ = "weight_g" elif "weight_v" in name: lowerCamelCase_ = "weight_v" elif "weight_ih_l0" in name: lowerCamelCase_ = "weight_ih_l0" elif "weight_hh_l0" in name: lowerCamelCase_ = "weight_hh_l0" elif "bias_ih_l0" in name: lowerCamelCase_ = "bias_ih_l0" elif "bias_hh_l0" in name: lowerCamelCase_ = "bias_hh_l0" elif "weight_ih_l1" in name: lowerCamelCase_ = "weight_ih_l1" elif "weight_hh_l1" in name: lowerCamelCase_ = "weight_hh_l1" elif "bias_ih_l1" in name: lowerCamelCase_ = "bias_ih_l1" elif "bias_hh_l1" in name: lowerCamelCase_ = "bias_hh_l1" elif "bias" in name: lowerCamelCase_ = "bias" elif "weight" in name: lowerCamelCase_ = "weight" elif "running_mean" in name: lowerCamelCase_ = "running_mean" elif "running_var" in name: lowerCamelCase_ = "running_var" elif "num_batches_tracked" in name: lowerCamelCase_ = "num_batches_tracked" else: lowerCamelCase_ = None set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) continue if not is_used: unused_weights.append(UpperCAmelCase_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) @torch.no_grad() def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , ): if config_path is not None: lowerCamelCase_ = EncodecConfig.from_pretrained(UpperCAmelCase_ ) else: lowerCamelCase_ = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": lowerCamelCase_ = [8, 5, 4, 4] lowerCamelCase_ = [2.2] lowerCamelCase_ = 64 lowerCamelCase_ = 32000 lowerCamelCase_ = 2048 lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False elif model_name == "encodec_48khz": lowerCamelCase_ = [8, 5, 4, 2] lowerCamelCase_ = [3.0, 6.0, 12.0, 24.0] lowerCamelCase_ = 48000 lowerCamelCase_ = 2 lowerCamelCase_ = False lowerCamelCase_ = "time_group_norm" lowerCamelCase_ = True lowerCamelCase_ = 1.0 lowerCamelCase_ = 0.01 else: raise ValueError(F'''Unknown model name: {model_name}''' ) lowerCamelCase_ = EncodecModel(UpperCAmelCase_ ) lowerCamelCase_ = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = torch.load(UpperCAmelCase_ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights lowerCamelCase_ = original_checkpoint["best_state"] recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) model.save_pretrained(UpperCAmelCase_ ) if repo_id: print("Pushing to the hub..." ) feature_extractor.push_to_hub(UpperCAmelCase_ ) model.push_to_hub(UpperCAmelCase_ ) if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() parser.add_argument( """--model""", default="""encodec_24khz""", type=str, help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""", ) parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) a_ : str = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
675
1
'''simple docstring''' from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract a_ : int = logging.get_logger(__name__) def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str ): return [ int(1000 * (box[0] / width) ), int(1000 * (box[1] / height) ), int(1000 * (box[2] / width) ), int(1000 * (box[3] / height) ), ] def __snake_case ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Optional[str] , UpperCAmelCase_ : Optional[str] ): lowerCamelCase_ = to_pil_image(UpperCAmelCase_ ) lowerCamelCase_ ,lowerCamelCase_ = pil_image.size lowerCamelCase_ = pytesseract.image_to_data(UpperCAmelCase_ , lang=UpperCAmelCase_ , output_type="dict" , config=UpperCAmelCase_ ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates lowerCamelCase_ = [idx for idx, word in enumerate(UpperCAmelCase_ ) if not word.strip()] lowerCamelCase_ = [word for idx, word in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices] lowerCamelCase_ = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices] lowerCamelCase_ = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices] lowerCamelCase_ = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices] lowerCamelCase_ = [coord for idx, coord in enumerate(UpperCAmelCase_ ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format lowerCamelCase_ = [] for x, y, w, h in zip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCamelCase_ = [x, y, x + w, y + h] actual_boxes.append(UpperCAmelCase_ ) # finally, normalize the bounding boxes lowerCamelCase_ = [] for box in actual_boxes: normalized_boxes.append(normalize_box(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ) assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ), "Not as many words as there are bounding boxes" return words, normalized_boxes class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = ["pixel_values"] def __init__( self , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = PILImageResampling.BILINEAR , UpperCamelCase = True , UpperCamelCase = 1 / 255 , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = "" , **UpperCamelCase , ): """simple docstring""" super().__init__(**UpperCamelCase ) lowerCamelCase_ = size if size is not None else {"height": 224, "width": 224} lowerCamelCase_ = get_size_dict(UpperCamelCase ) lowerCamelCase_ = do_resize lowerCamelCase_ = size lowerCamelCase_ = resample lowerCamelCase_ = do_rescale lowerCamelCase_ = rescale_value lowerCamelCase_ = do_normalize lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD lowerCamelCase_ = apply_ocr lowerCamelCase_ = ocr_lang lowerCamelCase_ = tesseract_config def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = PILImageResampling.BILINEAR , UpperCamelCase = None , **UpperCamelCase , ): """simple docstring""" lowerCamelCase_ = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) lowerCamelCase_ = (size["height"], size["width"]) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , **UpperCamelCase , ): """simple docstring""" return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , **UpperCamelCase , ): """simple docstring""" return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase=None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = ChannelDimension.FIRST , **UpperCamelCase , ): """simple docstring""" lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize lowerCamelCase_ = size if size is not None else self.size lowerCamelCase_ = get_size_dict(UpperCamelCase ) lowerCamelCase_ = resample if resample is not None else self.resample lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean lowerCamelCase_ = image_std if image_std is not None else self.image_std lowerCamelCase_ = apply_ocr if apply_ocr is not None else self.apply_ocr lowerCamelCase_ = ocr_lang if ocr_lang is not None else self.ocr_lang lowerCamelCase_ = tesseract_config if tesseract_config is not None else self.tesseract_config lowerCamelCase_ = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("If do_normalize is True, image_mean and image_std must be specified." ) # All transformations expect numpy arrays. lowerCamelCase_ = [to_numpy_array(UpperCamelCase ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , "pytesseract" ) lowerCamelCase_ = [] lowerCamelCase_ = [] for image in images: lowerCamelCase_ ,lowerCamelCase_ = apply_tesseract(UpperCamelCase , UpperCamelCase , UpperCamelCase ) words_batch.append(UpperCamelCase ) boxes_batch.append(UpperCamelCase ) if do_resize: lowerCamelCase_ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_rescale: lowerCamelCase_ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: lowerCamelCase_ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] lowerCamelCase_ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] lowerCamelCase_ = BatchFeature(data={"pixel_values": images} , tensor_type=UpperCamelCase ) if apply_ocr: lowerCamelCase_ = words_batch lowerCamelCase_ = boxes_batch return data
675
'''simple docstring''' from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = "arrow" , **UpperCamelCase , ): """simple docstring""" super().__init__( split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , **UpperCamelCase , ) lowerCamelCase_ = load_from_cache_file lowerCamelCase_ = file_format lowerCamelCase_ = Spark( df=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , working_dir=UpperCamelCase , **UpperCamelCase , ) def snake_case ( self ): """simple docstring""" if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCamelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
675
1
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging a_ : Optional[int] = logging.get_logger(__name__) a_ : str = { """google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = "umt5" _lowerCamelCase = ["past_key_values"] def __init__( self , UpperCamelCase=25_0112 , UpperCamelCase=512 , UpperCamelCase=64 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=None , UpperCamelCase=6 , UpperCamelCase=32 , UpperCamelCase=128 , UpperCamelCase=0.1 , UpperCamelCase=1e-6 , UpperCamelCase=1.0 , UpperCamelCase="gated-gelu" , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="T5Tokenizer" , UpperCamelCase=True , UpperCamelCase=0 , UpperCamelCase=1 , UpperCamelCase=0 , **UpperCamelCase , ): """simple docstring""" super().__init__( is_encoder_decoder=UpperCamelCase , tokenizer_class=UpperCamelCase , tie_word_embeddings=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , decoder_start_token_id=UpperCamelCase , **UpperCamelCase , ) lowerCamelCase_ = vocab_size lowerCamelCase_ = d_model lowerCamelCase_ = d_kv lowerCamelCase_ = d_ff lowerCamelCase_ = num_layers lowerCamelCase_ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCamelCase_ = num_heads lowerCamelCase_ = relative_attention_num_buckets lowerCamelCase_ = relative_attention_max_distance lowerCamelCase_ = dropout_rate lowerCamelCase_ = layer_norm_epsilon lowerCamelCase_ = initializer_factor lowerCamelCase_ = feed_forward_proj lowerCamelCase_ = use_cache lowerCamelCase_ = self.feed_forward_proj.split("-" ) lowerCamelCase_ = act_info[-1] lowerCamelCase_ = act_info[0] == "gated" if len(UpperCamelCase ) > 1 and act_info[0] != "gated" or len(UpperCamelCase ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) if feed_forward_proj == "gated-gelu": lowerCamelCase_ = "gelu_new" @property def snake_case ( self ): """simple docstring""" return self.d_model @property def snake_case ( self ): """simple docstring""" return self.num_heads @property def snake_case ( self ): """simple docstring""" return self.num_layers class snake_case ( lowercase ): """simple docstring""" @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def snake_case ( self ): """simple docstring""" lowerCamelCase_ = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: lowerCamelCase_ = "past_encoder_sequence + sequence" lowerCamelCase_ = {0: "batch"} lowerCamelCase_ = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCamelCase_ = {0: "batch", 1: "decoder_sequence"} lowerCamelCase_ = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(UpperCamelCase , direction="inputs" ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def snake_case ( self ): """simple docstring""" return 13 @property def snake_case ( self ): """simple docstring""" return 5e-4
675
'''simple docstring''' def __snake_case ( ): lowerCamelCase_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] lowerCamelCase_ = 6 lowerCamelCase_ = 1 lowerCamelCase_ = 1901 lowerCamelCase_ = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 lowerCamelCase_ = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 lowerCamelCase_ = day - 29 else: if day > days_per_month[month - 1]: month += 1 lowerCamelCase_ = day - days_per_month[month - 2] if month > 12: year += 1 lowerCamelCase_ = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
675
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : str = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ """WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """WavLMForAudioFrameClassification""", """WavLMForCTC""", """WavLMForSequenceClassification""", """WavLMForXVector""", """WavLMModel""", """WavLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys a_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
675
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a_ : Optional[int] = logging.get_logger(__name__) a_ : Dict = { """SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""", # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = "deformable_detr" _lowerCamelCase = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=3 , UpperCamelCase=300 , UpperCamelCase=1024 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=0.0 , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=256 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=1.0 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="sine" , UpperCamelCase="resnet50" , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=False , UpperCamelCase=300 , UpperCamelCase=False , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=0.1 , UpperCamelCase=0.25 , UpperCamelCase=False , **UpperCamelCase , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) lowerCamelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = backbone_config.get("model_type" ) lowerCamelCase_ = CONFIG_MAPPING[backbone_model_type] lowerCamelCase_ = config_class.from_dict(UpperCamelCase ) lowerCamelCase_ = use_timm_backbone lowerCamelCase_ = backbone_config lowerCamelCase_ = num_channels lowerCamelCase_ = num_queries lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = d_model lowerCamelCase_ = encoder_ffn_dim lowerCamelCase_ = encoder_layers lowerCamelCase_ = encoder_attention_heads lowerCamelCase_ = decoder_ffn_dim lowerCamelCase_ = decoder_layers lowerCamelCase_ = decoder_attention_heads lowerCamelCase_ = dropout lowerCamelCase_ = attention_dropout lowerCamelCase_ = activation_dropout lowerCamelCase_ = activation_function lowerCamelCase_ = init_std lowerCamelCase_ = init_xavier_std lowerCamelCase_ = encoder_layerdrop lowerCamelCase_ = auxiliary_loss lowerCamelCase_ = position_embedding_type lowerCamelCase_ = backbone lowerCamelCase_ = use_pretrained_backbone lowerCamelCase_ = dilation # deformable attributes lowerCamelCase_ = num_feature_levels lowerCamelCase_ = encoder_n_points lowerCamelCase_ = decoder_n_points lowerCamelCase_ = two_stage lowerCamelCase_ = two_stage_num_proposals lowerCamelCase_ = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher lowerCamelCase_ = class_cost lowerCamelCase_ = bbox_cost lowerCamelCase_ = giou_cost # Loss coefficients lowerCamelCase_ = mask_loss_coefficient lowerCamelCase_ = dice_loss_coefficient lowerCamelCase_ = bbox_loss_coefficient lowerCamelCase_ = giou_loss_coefficient lowerCamelCase_ = eos_coefficient lowerCamelCase_ = focal_alpha lowerCamelCase_ = disable_custom_kernels super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase ) @property def snake_case ( self ): """simple docstring""" return self.encoder_attention_heads @property def snake_case ( self ): """simple docstring""" return self.d_model def snake_case ( self ): """simple docstring""" lowerCamelCase_ = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowerCamelCase_ = self.backbone_config.to_dict() lowerCamelCase_ = self.__class__.model_type return output
675
1
'''simple docstring''' import math import sys import cva import numpy as np def __snake_case ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float ): # For applying gaussian function for each element in matrix. lowerCamelCase_ = math.sqrt(UpperCAmelCase_ ) lowerCamelCase_ = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def __snake_case ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): lowerCamelCase_ = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : float ): # Creates a gaussian kernel of given dimension. lowerCamelCase_ = np.zeros((kernel_size, kernel_size) ) for i in range(0 , UpperCAmelCase_ ): for j in range(0 , UpperCAmelCase_ ): lowerCamelCase_ = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : int , ): lowerCamelCase_ = np.zeros(img.shape ) lowerCamelCase_ = get_gauss_kernel(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ ,lowerCamelCase_ = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): lowerCamelCase_ = get_slice(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = img_s - img_s[kernel_size // 2, kernel_size // 2] lowerCamelCase_ = vec_gaussian(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = np.multiply(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = np.multiply(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = np.sum(UpperCAmelCase_ ) / np.sum(UpperCAmelCase_ ) lowerCamelCase_ = val return imga def __snake_case ( UpperCAmelCase_ : list ): lowerCamelCase_ = args[1] if args[1:] else "../image_data/lena.jpg" lowerCamelCase_ = float(args[2] ) if args[2:] else 1.0 lowerCamelCase_ = float(args[3] ) if args[3:] else 1.0 if args[4:]: lowerCamelCase_ = int(args[4] ) lowerCamelCase_ = kernel_size + abs(kernel_size % 2 - 1 ) else: lowerCamelCase_ = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": a_ , a_ , a_ , a_ : Tuple = parse_args(sys.argv) a_ : Dict = cva.imread(filename, 0) cva.imshow("""input image""", img) a_ : Any = img / 255 a_ : Tuple = out.astype("""float32""") a_ : int = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) a_ : Tuple = out * 255 a_ : Tuple = np.uinta(out) cva.imshow("""output image""", out) cva.waitKey(0) cva.destroyAllWindows()
675
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class snake_case ( pl.LightningModule ): """simple docstring""" def __init__( self , UpperCamelCase ): """simple docstring""" super().__init__() lowerCamelCase_ = model lowerCamelCase_ = 2 lowerCamelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels ) def snake_case ( self ): """simple docstring""" pass def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str ): # load longformer model from model identifier lowerCamelCase_ = LongformerModel.from_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = LightningModel(UpperCAmelCase_ ) lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model lowerCamelCase_ = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase_ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(UpperCAmelCase_ ) print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' ) if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--longformer_model""", default=None, type=str, required=True, help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""", ) parser.add_argument( """--longformer_question_answering_ckpt_path""", default=None, type=str, required=True, help="""Path the official PyTorch Lightning Checkpoint.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Tuple = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
675
1
'''simple docstring''' from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = "arrow" , **UpperCamelCase , ): """simple docstring""" super().__init__( split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , **UpperCamelCase , ) lowerCamelCase_ = load_from_cache_file lowerCamelCase_ = file_format lowerCamelCase_ = Spark( df=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , working_dir=UpperCamelCase , **UpperCamelCase , ) def snake_case ( self ): """simple docstring""" if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCamelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
675
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a_ : Optional[Any] = { """configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""], """tokenization_ctrl""": ["""CTRLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : str = [ """CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """CTRLForSequenceClassification""", """CTRLLMHeadModel""", """CTRLModel""", """CTRLPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ """TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCTRLForSequenceClassification""", """TFCTRLLMHeadModel""", """TFCTRLModel""", """TFCTRLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
675
1
'''simple docstring''' import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self ): """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = BioGptModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ): """simple docstring""" lowerCamelCase_ = BioGptForCausalLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , *UpperCamelCase ): """simple docstring""" lowerCamelCase_ = BioGptModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() # create attention mask lowerCamelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCamelCase ) lowerCamelCase_ = self.seq_length // 2 lowerCamelCase_ = 0 # first forward pass lowerCamelCase_ ,lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase ).to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids lowerCamelCase_ = ids_tensor((1,) , UpperCamelCase ).item() + 1 lowerCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) lowerCamelCase_ = random_other_next_tokens # append to next input_ids and attn_mask lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCamelCase_ = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=UpperCamelCase )] , dim=1 , ) # get two different outputs lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase )["last_hidden_state"] lowerCamelCase_ = model(UpperCamelCase , past_key_values=UpperCamelCase , attention_mask=UpperCamelCase )["last_hidden_state"] # select random slice lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCamelCase_ = output_from_no_past[:, -1, random_slice_idx].detach() lowerCamelCase_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , *UpperCamelCase ): """simple docstring""" lowerCamelCase_ = BioGptModel(config=UpperCamelCase ).to(UpperCamelCase ).eval() lowerCamelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCamelCase ) # first forward pass lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , use_cache=UpperCamelCase ) lowerCamelCase_ ,lowerCamelCase_ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCamelCase_ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCamelCase_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase )["last_hidden_state"] lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , past_key_values=UpperCamelCase )[ "last_hidden_state" ] # select random slice lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCamelCase_ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , *UpperCamelCase , UpperCamelCase=False ): """simple docstring""" lowerCamelCase_ = BioGptForCausalLM(UpperCamelCase ) model.to(UpperCamelCase ) if gradient_checkpointing: model.gradient_checkpointing_enable() lowerCamelCase_ = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def snake_case ( self , UpperCamelCase , *UpperCamelCase ): """simple docstring""" lowerCamelCase_ = BioGptModel(UpperCamelCase ) lowerCamelCase_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , *UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = BioGptForTokenClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case ( lowercase , lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) _lowerCamelCase = (BioGptForCausalLM,) if is_torch_available() else () _lowerCamelCase = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase = False def snake_case ( self ): """simple docstring""" lowerCamelCase_ = BioGptModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase_ = type self.model_tester.create_and_check_model(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*UpperCamelCase , gradient_checkpointing=UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(UpperCamelCase ) lowerCamelCase_ = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) lowerCamelCase_ = "left" # Define PAD Token = EOS Token = 50256 lowerCamelCase_ = tokenizer.eos_token lowerCamelCase_ = model.config.eos_token_id # use different length sentences to test batching lowerCamelCase_ = [ "Hello, my dog is a little", "Today, I", ] lowerCamelCase_ = tokenizer(UpperCamelCase , return_tensors="pt" , padding=UpperCamelCase ) lowerCamelCase_ = inputs["input_ids"].to(UpperCamelCase ) lowerCamelCase_ = model.generate( input_ids=UpperCamelCase , attention_mask=inputs["attention_mask"].to(UpperCamelCase ) , ) lowerCamelCase_ = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(UpperCamelCase ) lowerCamelCase_ = model.generate(input_ids=UpperCamelCase ) lowerCamelCase_ = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() lowerCamelCase_ = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(UpperCamelCase ) lowerCamelCase_ = model.generate(input_ids=UpperCamelCase , max_length=model.config.max_length - num_paddings ) lowerCamelCase_ = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) lowerCamelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase ) lowerCamelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase ) lowerCamelCase_ = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(UpperCamelCase , UpperCamelCase ) self.assertListEqual(UpperCamelCase , [non_padded_sentence, padded_sentence] ) @slow def snake_case ( self ): """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = BioGptModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = 3 lowerCamelCase_ = input_dict["input_ids"] lowerCamelCase_ = input_ids.ne(1 ).to(UpperCamelCase ) lowerCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCamelCase_ = BioGptForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = 3 lowerCamelCase_ = "multi_label_classification" lowerCamelCase_ = input_dict["input_ids"] lowerCamelCase_ = input_ids.ne(1 ).to(UpperCamelCase ) lowerCamelCase_ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCamelCase_ = BioGptForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class snake_case ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) lowerCamelCase_ = torch.tensor([[2, 4805, 9, 656, 21]] ) lowerCamelCase_ = model(UpperCamelCase )[0] lowerCamelCase_ = 4_2384 lowerCamelCase_ = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , UpperCamelCase ) lowerCamelCase_ = torch.tensor( [[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=1e-4 ) ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) lowerCamelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(UpperCamelCase ) torch.manual_seed(0 ) lowerCamelCase_ = tokenizer("COVID-19 is" , return_tensors="pt" ).to(UpperCamelCase ) lowerCamelCase_ = model.generate( **UpperCamelCase , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=UpperCamelCase , ) lowerCamelCase_ = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCamelCase ) lowerCamelCase_ = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(UpperCamelCase , UpperCamelCase )
675
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a_ : Any = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = GPTSwaTokenizer _lowerCamelCase = False _lowerCamelCase = True _lowerCamelCase = False def snake_case ( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = "This is a test" lowerCamelCase_ = "This is a test" return input_text, output_text def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "<s>" lowerCamelCase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(UpperCamelCase ) , 2000 ) def snake_case ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 2000 ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase ) lowerCamelCase_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [465, 287, 265, 631, 842] ) lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) # fmt: off self.assertListEqual( UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , ) # fmt: on lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase ) self.assertListEqual( UpperCamelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) lowerCamelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) # fmt: off self.assertListEqual( UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ) # fmt: on def snake_case ( self ): """simple docstring""" lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase ) lowerCamelCase_ = ["This is a test", "I was born in 92000, and this is falsé."] lowerCamelCase_ = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(UpperCamelCase , UpperCamelCase ): self.assertListEqual(tokenizer.encode_fast(UpperCamelCase ) , UpperCamelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(tokenizer.decode_fast(UpperCamelCase ) , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = [ "<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off lowerCamelCase_ = {"input_ids": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=UpperCamelCase , )
675
1
'''simple docstring''' import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=3 , UpperCamelCase=16 , UpperCamelCase=[1, 2, 1] , UpperCamelCase=[2, 2, 4] , UpperCamelCase=2 , UpperCamelCase=2.0 , UpperCamelCase=True , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase="gelu" , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=0.02 , UpperCamelCase=1e-5 , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=10 , UpperCamelCase=8 , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = patch_size lowerCamelCase_ = num_channels lowerCamelCase_ = embed_dim lowerCamelCase_ = depths lowerCamelCase_ = num_heads lowerCamelCase_ = window_size lowerCamelCase_ = mlp_ratio lowerCamelCase_ = qkv_bias lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = drop_path_rate lowerCamelCase_ = hidden_act lowerCamelCase_ = use_absolute_embeddings lowerCamelCase_ = patch_norm lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = initializer_range lowerCamelCase_ = is_training lowerCamelCase_ = scope lowerCamelCase_ = use_labels lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = encoder_stride def snake_case ( self ): """simple docstring""" lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def snake_case ( self ): """simple docstring""" return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = SwinvaModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase ) lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = SwinvaForMaskedImageModeling(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase_ = 1 lowerCamelCase_ = SwinvaForMaskedImageModeling(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.type_sequence_label_size lowerCamelCase_ = SwinvaForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) _lowerCamelCase = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def snake_case ( self ): """simple docstring""" lowerCamelCase_ = SwinvaModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , embed_dim=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." ) def snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="Swinv2 does not use inputs_embeds" ) def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = True for model_class in self.all_model_classes: lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = True lowerCamelCase_ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) lowerCamelCase_ = outputs.attentions lowerCamelCase_ = len(self.model_tester.depths ) self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase_ = True lowerCamelCase_ = config.window_size**2 lowerCamelCase_ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) lowerCamelCase_ = outputs.attentions self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) lowerCamelCase_ = len(UpperCamelCase ) # Check attention is always last and order is fine lowerCamelCase_ = True lowerCamelCase_ = True lowerCamelCase_ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) if hasattr(self.model_tester , "num_hidden_states_types" ): lowerCamelCase_ = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowerCamelCase_ = 2 self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase ) ) lowerCamelCase_ = outputs.attentions self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) lowerCamelCase_ = outputs.hidden_states lowerCamelCase_ = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) # Swinv2 has a different seq_length lowerCamelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCamelCase_ = outputs.reshaped_hidden_states self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = reshaped_hidden_states[0].shape lowerCamelCase_ = ( reshaped_hidden_states[0].view(UpperCamelCase , UpperCamelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCamelCase_ = True self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = 3 lowerCamelCase_ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase_ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCamelCase_ = True self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , (padded_height, padded_width) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = SwinvaModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = _config_zero_init(UpperCamelCase ) for model_class in self.all_model_classes: lowerCamelCase_ = model_class(config=UpperCamelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case ( self ): """simple docstring""" return ( AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ) if is_vision_available() else None ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to( UpperCamelCase ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowerCamelCase_ = image_processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase ) # forward pass with torch.no_grad(): lowerCamelCase_ = model(**UpperCamelCase ) # verify the logits lowerCamelCase_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowerCamelCase_ = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
675
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = ["image_processor", "tokenizer"] _lowerCamelCase = "OwlViTImageProcessor" _lowerCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCamelCase , ) lowerCamelCase_ = kwargs.pop("feature_extractor" ) lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCamelCase , UpperCamelCase ) def __call__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="max_length" , UpperCamelCase="np" , **UpperCamelCase ): """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(UpperCamelCase , UpperCamelCase ) or (isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(text[0] , UpperCamelCase )): lowerCamelCase_ = [self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )] elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(text[0] , UpperCamelCase ): lowerCamelCase_ = [] # Maximum number of queries across batch lowerCamelCase_ = max([len(UpperCamelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(UpperCamelCase ) != max_num_queries: lowerCamelCase_ = t + [" "] * (max_num_queries - len(UpperCamelCase )) lowerCamelCase_ = self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) encodings.append(UpperCamelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": lowerCamelCase_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowerCamelCase_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowerCamelCase_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) lowerCamelCase_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowerCamelCase_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) lowerCamelCase_ = BatchEncoding() lowerCamelCase_ = input_ids lowerCamelCase_ = attention_mask if query_images is not None: lowerCamelCase_ = BatchEncoding() lowerCamelCase_ = self.image_processor( UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ).pixel_values lowerCamelCase_ = query_pixel_values if images is not None: lowerCamelCase_ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: lowerCamelCase_ = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowerCamelCase_ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process_object_detection(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process_image_guided_detection(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase , ) return self.image_processor_class @property def snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase , ) return self.image_processor
675
1
'''simple docstring''' def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = 0 for ch in input_str: lowerCamelCase_ = ord(UpperCAmelCase_ ) lowerCamelCase_ = pow(2 , UpperCAmelCase_ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
675
'''simple docstring''' import os import sys import unittest a_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path a_ : Tuple = os.path.join(git_repo_path, """src""", """transformers""") a_ : List[Any] = """ {0} = None """ a_ : Optional[Any] = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ a_ : str = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" ) self.assertIsNone(UpperCamelCase ) lowerCamelCase_ = find_backend(" if not is_tokenizers_available():" ) self.assertEqual(UpperCamelCase , "tokenizers" ) lowerCamelCase_ = find_backend(" if not is_tensorflow_text_available():" ) self.assertEqual(UpperCamelCase , "tensorflow_text" ) lowerCamelCase_ = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers" ) lowerCamelCase_ = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tensorflow_text" ) lowerCamelCase_ = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers_and_vision" ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , UpperCamelCase ) self.assertIn("tensorflow_text" , UpperCamelCase ) self.assertIn("sentencepiece_and_tokenizers" , UpperCamelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertModel" , objects["tf"] ) self.assertIn("FlaxBertModel" , objects["flax"] ) self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] ) self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(UpperCamelCase , "\nCONSTANT = None\n" ) lowerCamelCase_ = create_dummy_object("function" , "'torch'" ) self.assertEqual( UpperCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) lowerCamelCase_ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n" lowerCamelCase_ = create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n" lowerCamelCase_ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , UpperCamelCase )
675
1
'''simple docstring''' def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ): if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError("String lengths must match!" ) lowerCamelCase_ = 0 for chara, chara in zip(UpperCAmelCase_ , UpperCAmelCase_ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
675
'''simple docstring''' from ..utils import DummyObject, requires_backends class snake_case ( metaclass=lowercase ): """simple docstring""" _lowerCamelCase = ["onnx"] def __init__( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(self , ["onnx"] ) @classmethod def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(cls , ["onnx"] ) @classmethod def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(cls , ["onnx"] )
675
1
'''simple docstring''' import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict ): if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer lowerCamelCase_ = flax_key_tuple[:-1] + ("weight",) lowerCamelCase_ = torch.permute(UpperCAmelCase_ , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCAmelCase_ ): # linear layer lowerCamelCase_ = flax_key_tuple[:-1] + ("weight",) lowerCamelCase_ = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase_ = flax_key_tuple[:-1] + ("weight",) return flax_key_tuple, flax_tensor def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] ): if "metadata" in layer: lowerCamelCase_ = layer.split("metadata" ) lowerCamelCase_ = "".join(split_layer[0] )[:-1] lowerCamelCase_ = [tuple(("metadata" + split_layer[1]).split("/" ) )] elif "kvstore" in layer: lowerCamelCase_ = layer.split("kvstore" ) lowerCamelCase_ = "".join(split_layer[0] )[:-1] lowerCamelCase_ = [tuple(("kvstore" + split_layer[1]).split("/" ) )] else: lowerCamelCase_ = layer.split("/" ) lowerCamelCase_ = "/".join(split_layer[:-1] ) lowerCamelCase_ = (split_layer[-1],) if "kvstore/path" in layer: lowerCamelCase_ = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}''' elif "kvstore/driver" in layer: lowerCamelCase_ = "file" else: lowerCamelCase_ = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ): lowerCamelCase_ = rename_keys(UpperCAmelCase_ ) lowerCamelCase_ = {} for k, v in current_block.items(): lowerCamelCase_ = v lowerCamelCase_ = new_current_block torch.save(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str = WEIGHTS_NAME ): lowerCamelCase_ = convert_file_size_to_int(UpperCAmelCase_ ) lowerCamelCase_ = [] lowerCamelCase_ = {} lowerCamelCase_ = 0 lowerCamelCase_ = 0 os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp: lowerCamelCase_ = serialization.msgpack_restore(fp.read() )["optimizer"]["target"] lowerCamelCase_ = flatten_dict(UpperCAmelCase_ , sep="/" ) lowerCamelCase_ = {} for layer in checkpoint_info.keys(): lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = get_key_and_tensorstore_dict( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) if curr_real_layer_name in all_layers: lowerCamelCase_ = content else: lowerCamelCase_ = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file lowerCamelCase_ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() lowerCamelCase_ = torch.tensor(UpperCAmelCase_ ) lowerCamelCase_ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts lowerCamelCase_ ,lowerCamelCase_ = rename_base_flax_keys(tuple(key.split("/" ) ) , UpperCAmelCase_ ) lowerCamelCase_ = "/".join(UpperCAmelCase_ ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: lowerCamelCase_ = os.path.join( UpperCAmelCase_ , weights_name.replace(".bin" , F'''-{len(UpperCAmelCase_ )+1:05d}-of-???.bin''' ) ) rename_and_save_block(UpperCAmelCase_ , UpperCAmelCase_ ) sharded_state_dicts.append(current_block.keys() ) del current_block lowerCamelCase_ = {} lowerCamelCase_ = 0 lowerCamelCase_ = raw_weights.to(getattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) current_block_size += weight_size total_size += weight_size # Add the last block lowerCamelCase_ = os.path.join(UpperCAmelCase_ , weights_name.replace(".bin" , F'''-{len(UpperCAmelCase_ )+1:05d}-of-???.bin''' ) ) rename_and_save_block(UpperCAmelCase_ , UpperCAmelCase_ ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(UpperCAmelCase_ ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index lowerCamelCase_ = {} lowerCamelCase_ = {} for idx, shard in enumerate(UpperCAmelCase_ ): lowerCamelCase_ = weights_name.replace( ".bin" , F'''-{idx+1:05d}-of-{len(UpperCAmelCase_ ):05d}.bin''' ) # len(sharded_state_dicts):05d} lowerCamelCase_ = os.path.join(UpperCAmelCase_ , weights_name.replace(".bin" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) lowerCamelCase_ = shard for key in shard: lowerCamelCase_ = shard_file # Add the metadata lowerCamelCase_ = {"total_size": total_size} lowerCamelCase_ = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , "w" , encoding="utf-8" ) as f: lowerCamelCase_ = json.dumps(UpperCAmelCase_ , indent=2 , sort_keys=UpperCAmelCase_ ) + "\n" f.write(UpperCAmelCase_ ) return metadata, index if __name__ == "__main__": a_ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""") parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""", type=str, required=False, help="""Path to the output pytorch model.""", ) a_ : Union[str, Any] = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def __snake_case ( ): from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer lowerCamelCase_ = SwitchTransformersConfig.from_pretrained("google/switch-base-8" ) config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" ) lowerCamelCase_ = SwitchTransformersForConditionalGeneration.from_pretrained( "/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" ) lowerCamelCase_ = TaTokenizer.from_pretrained("t5-small" ) lowerCamelCase_ = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>." lowerCamelCase_ = tokenizer(UpperCAmelCase_ , return_tensors="pt" ).input_ids lowerCamelCase_ = model.generate(UpperCAmelCase_ , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
675
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , UpperCamelCase=1000 , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope lowerCamelCase_ = range_bbox def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCamelCase_ = bbox[i, j, 3] lowerCamelCase_ = bbox[i, j, 1] lowerCamelCase_ = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCamelCase_ = bbox[i, j, 2] lowerCamelCase_ = bbox[i, j, 0] lowerCamelCase_ = t lowerCamelCase_ = tf.convert_to_tensor(UpperCamelCase ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModel(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMForMaskedLM(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = TFLayoutLMForSequenceClassification(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = TFLayoutLMForTokenClassification(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMForQuestionAnswering(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) _lowerCamelCase = ( { "feature-extraction": TFLayoutLMModel, "fill-mask": TFLayoutLMForMaskedLM, "text-classification": TFLayoutLMForSequenceClassification, "token-classification": TFLayoutLMForTokenClassification, "zero-shot": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase = False _lowerCamelCase = True _lowerCamelCase = 10 def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFLayoutLMModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def snake_case ( self ): """simple docstring""" pass def __snake_case ( ): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off lowerCamelCase_ = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) lowerCamelCase_ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class snake_case ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) # test the sequence output on [0, :3, :3] lowerCamelCase_ = tf.convert_to_tensor( [[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase , atol=1e-3 ) ) # test the pooled output on [1, :3] lowerCamelCase_ = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , UpperCamelCase , atol=1e-3 ) ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized sequence classification head lowerCamelCase_ = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model( input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar lowerCamelCase_ = outputs.loss lowerCamelCase_ = (2,) self.assertEqual(loss.shape , UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = outputs.logits lowerCamelCase_ = (2, 2) self.assertEqual(logits.shape , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized token classification head lowerCamelCase_ = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model( input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = outputs.logits lowerCamelCase_ = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized token classification head lowerCamelCase_ = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , UpperCamelCase ) self.assertEqual(outputs.end_logits.shape , UpperCamelCase )
675
1
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def __snake_case ( UpperCAmelCase_ : List[str] ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f) or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) # or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) # or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) # or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) # or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f) or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) # ): # return True return False def __snake_case ( UpperCAmelCase_ : str ): # word like '180' or '身高' or '神' for char in word: lowerCamelCase_ = ord(UpperCAmelCase_ ) if not _is_chinese_char(UpperCAmelCase_ ): return 0 return 1 def __snake_case ( UpperCAmelCase_ : List[str] ): lowerCamelCase_ = set() for token in tokens: lowerCamelCase_ = len(UpperCAmelCase_ ) > 1 and is_chinese(UpperCAmelCase_ ) if chinese_word: word_set.add(UpperCAmelCase_ ) lowerCamelCase_ = list(UpperCAmelCase_ ) return word_list def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : set() ): if not chinese_word_set: return bert_tokens lowerCamelCase_ = max([len(UpperCAmelCase_ ) for w in chinese_word_set] ) lowerCamelCase_ = bert_tokens lowerCamelCase_ ,lowerCamelCase_ = 0, len(UpperCAmelCase_ ) while start < end: lowerCamelCase_ = True if is_chinese(bert_word[start] ): lowerCamelCase_ = min(end - start , UpperCAmelCase_ ) for i in range(UpperCAmelCase_ , 1 , -1 ): lowerCamelCase_ = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCamelCase_ = "##" + bert_word[j] lowerCamelCase_ = start + i lowerCamelCase_ = False break if single_word: start += 1 return bert_word def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : LTP , UpperCAmelCase_ : BertTokenizer ): lowerCamelCase_ = [] for i in range(0 , len(UpperCAmelCase_ ) , 100 ): lowerCamelCase_ = ltp_tokenizer.seg(lines[i : i + 100] )[0] lowerCamelCase_ = [get_chinese_word(UpperCAmelCase_ ) for r in res] ltp_res.extend(UpperCAmelCase_ ) assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) lowerCamelCase_ = [] for i in range(0 , len(UpperCAmelCase_ ) , 100 ): lowerCamelCase_ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=512 ) bert_res.extend(res["input_ids"] ) assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) lowerCamelCase_ = [] for input_ids, chinese_word in zip(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCamelCase_ = [] for id in input_ids: lowerCamelCase_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase_ ) input_tokens.append(UpperCAmelCase_ ) lowerCamelCase_ = add_sub_symbol(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(UpperCAmelCase_ ): if token[:2] == "##": lowerCamelCase_ = token[2:] # save chinese tokens' pos if len(UpperCAmelCase_ ) == 1 and _is_chinese_char(ord(UpperCAmelCase_ ) ): ref_id.append(UpperCAmelCase_ ) ref_ids.append(UpperCAmelCase_ ) assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) return ref_ids def __snake_case ( UpperCAmelCase_ : List[Any] ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , "r" , encoding="utf-8" ) as f: lowerCamelCase_ = f.readlines() lowerCamelCase_ = [line.strip() for line in data if len(UpperCAmelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCamelCase_ = LTP(args.ltp ) # faster in GPU device lowerCamelCase_ = BertTokenizer.from_pretrained(args.bert ) lowerCamelCase_ = prepare_ref(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) with open(args.save_path , "w" , encoding="utf-8" ) as f: lowerCamelCase_ = [json.dumps(UpperCAmelCase_ ) + "\n" for ref in ref_ids] f.writelines(UpperCAmelCase_ ) if __name__ == "__main__": a_ : Optional[int] = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""" ) parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""") parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""") a_ : Optional[Any] = parser.parse_args() main(args)
675
'''simple docstring''' import argparse from collections import defaultdict import yaml a_ : int = """docs/source/en/_toctree.yml""" def __snake_case ( UpperCAmelCase_ : Optional[int] ): lowerCamelCase_ = defaultdict(UpperCAmelCase_ ) lowerCamelCase_ = [] lowerCamelCase_ = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"local": doc["local"], "title": doc["title"]} ) else: new_doc_list.append(UpperCAmelCase_ ) lowerCamelCase_ = new_doc_list lowerCamelCase_ = [key for key, value in counts.items() if value > 1] lowerCamelCase_ = [] for duplicate_key in duplicates: lowerCamelCase_ = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} ) if len(UpperCAmelCase_ ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] ) lowerCamelCase_ = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(UpperCAmelCase_ ) > 1: raise ValueError("{doc_list} has two 'overview' docs which is not allowed." ) overview_doc.extend(UpperCAmelCase_ ) # Sort return overview_doc def __snake_case ( UpperCAmelCase_ : List[str]=False ): with open(UpperCAmelCase_ , encoding="utf-8" ) as f: lowerCamelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCamelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCamelCase_ = content[api_idx]["sections"] # Then to the model doc lowerCamelCase_ = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 lowerCamelCase_ = api_doc[scheduler_idx]["sections"] lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ ) lowerCamelCase_ = False if new_scheduler_doc != scheduler_doc: lowerCamelCase_ = True if overwrite: lowerCamelCase_ = new_scheduler_doc if diff: if overwrite: lowerCamelCase_ = api_doc with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) def __snake_case ( UpperCAmelCase_ : List[Any]=False ): with open(UpperCAmelCase_ , encoding="utf-8" ) as f: lowerCamelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCamelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCamelCase_ = content[api_idx]["sections"] # Then to the model doc lowerCamelCase_ = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 lowerCamelCase_ = False lowerCamelCase_ = api_doc[pipeline_idx]["sections"] lowerCamelCase_ = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: lowerCamelCase_ = pipeline_doc["section"] lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ ) if overwrite: lowerCamelCase_ = new_sub_pipeline_doc new_pipeline_docs.append(UpperCAmelCase_ ) # sort overall pipeline doc lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ ) if new_pipeline_docs != pipeline_docs: lowerCamelCase_ = True if overwrite: lowerCamelCase_ = new_pipeline_docs if diff: if overwrite: lowerCamelCase_ = api_doc with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": a_ : Tuple = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") a_ : int = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
675
1
'''simple docstring''' from string import ascii_uppercase a_ : int = {char: i for i, char in enumerate(ascii_uppercase)} a_ : Union[str, Any] = dict(enumerate(ascii_uppercase)) def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ): lowerCamelCase_ = len(UpperCAmelCase_ ) lowerCamelCase_ = 0 while True: if x == i: lowerCamelCase_ = 0 if len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ): break key += key[i] i += 1 return key def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ): lowerCamelCase_ = "" lowerCamelCase_ = 0 for letter in message: if letter == " ": cipher_text += " " else: lowerCamelCase_ = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ): lowerCamelCase_ = "" lowerCamelCase_ = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: lowerCamelCase_ = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def __snake_case ( ): lowerCamelCase_ = "THE GERMAN ATTACK" lowerCamelCase_ = "SECRET" lowerCamelCase_ = generate_key(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = cipher_text(UpperCAmelCase_ , UpperCAmelCase_ ) print(F'''Encrypted Text = {s}''' ) print(F'''Original Text = {original_text(UpperCAmelCase_ , UpperCAmelCase_ )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
675
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=1024 , UpperCAmelCase_ : Tuple=1024 , UpperCAmelCase_ : List[Any]=False , **UpperCAmelCase_ : Optional[Any] ): lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="train" , **UpperCAmelCase_ ) lowerCamelCase_ = tok.pad_token_id def get_lens(UpperCAmelCase_ : List[str] ): lowerCamelCase_ = tqdm( DataLoader(UpperCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) lowerCamelCase_ = [] for batch in dl: lowerCamelCase_ = batch["input_ids"].ne(UpperCAmelCase_ ).sum(1 ).tolist() lowerCamelCase_ = batch["labels"].ne(UpperCAmelCase_ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(UpperCAmelCase_ , UpperCAmelCase_ ): max_lens.append(max(UpperCAmelCase_ , UpperCAmelCase_ ) ) else: max_lens.extend(UpperCAmelCase_ ) return max_lens lowerCamelCase_ = get_lens(UpperCAmelCase_ ) lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="val" , **UpperCAmelCase_ ) lowerCamelCase_ = get_lens(UpperCAmelCase_ ) pickle_save(UpperCAmelCase_ , train_ds.len_file ) pickle_save(UpperCAmelCase_ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
675
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ : Dict = { """configuration_roberta_prelayernorm""": [ """ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaPreLayerNormConfig""", """RobertaPreLayerNormOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = [ """ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaPreLayerNormForCausalLM""", """RobertaPreLayerNormForMaskedLM""", """RobertaPreLayerNormForMultipleChoice""", """RobertaPreLayerNormForQuestionAnswering""", """RobertaPreLayerNormForSequenceClassification""", """RobertaPreLayerNormForTokenClassification""", """RobertaPreLayerNormModel""", """RobertaPreLayerNormPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = [ """TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaPreLayerNormForCausalLM""", """TFRobertaPreLayerNormForMaskedLM""", """TFRobertaPreLayerNormForMultipleChoice""", """TFRobertaPreLayerNormForQuestionAnswering""", """TFRobertaPreLayerNormForSequenceClassification""", """TFRobertaPreLayerNormForTokenClassification""", """TFRobertaPreLayerNormMainLayer""", """TFRobertaPreLayerNormModel""", """TFRobertaPreLayerNormPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Any = [ """FlaxRobertaPreLayerNormForCausalLM""", """FlaxRobertaPreLayerNormForMaskedLM""", """FlaxRobertaPreLayerNormForMultipleChoice""", """FlaxRobertaPreLayerNormForQuestionAnswering""", """FlaxRobertaPreLayerNormForSequenceClassification""", """FlaxRobertaPreLayerNormForTokenClassification""", """FlaxRobertaPreLayerNormModel""", """FlaxRobertaPreLayerNormPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
675
'''simple docstring''' def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key lowerCamelCase_ = remove_duplicates(key.upper() ) lowerCamelCase_ = len(UpperCAmelCase_ ) # First fill cipher with key characters lowerCamelCase_ = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase_ )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(UpperCAmelCase_ ) , 26 ): lowerCamelCase_ = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 lowerCamelCase_ = alphabet[i - offset] lowerCamelCase_ = char return cipher_alphabet def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict[str, str] ): return "".join(cipher_map.get(UpperCAmelCase_ , UpperCAmelCase_ ) for ch in message.upper() ) def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict[str, str] ): lowerCamelCase_ = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(UpperCAmelCase_ , UpperCAmelCase_ ) for ch in message.upper() ) def __snake_case ( ): lowerCamelCase_ = input("Enter message to encode or decode: " ).strip() lowerCamelCase_ = input("Enter keyword: " ).strip() lowerCamelCase_ = input("Encipher or decipher? E/D:" ).strip()[0].lower() try: lowerCamelCase_ = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError("invalid input option" ) lowerCamelCase_ = create_cipher_map(UpperCAmelCase_ ) print(func(UpperCAmelCase_ , UpperCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
675
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = ["image_processor", "tokenizer"] _lowerCamelCase = "LayoutLMv2ImageProcessor" _lowerCamelCase = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast") def __init__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ): """simple docstring""" if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCamelCase , ) lowerCamelCase_ = kwargs.pop("feature_extractor" ) lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCamelCase , UpperCamelCase ) def __call__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 0 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = False , UpperCamelCase = False , UpperCamelCase = False , UpperCamelCase = True , UpperCamelCase = None , **UpperCamelCase , ): """simple docstring""" # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes " "if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." ) # first, apply the image processor lowerCamelCase_ = self.image_processor(images=UpperCamelCase , return_tensors=UpperCamelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCamelCase_ = features["words"] lowerCamelCase_ = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , ) # add pixel values lowerCamelCase_ = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowerCamelCase_ = self.get_overflowing_images(UpperCamelCase , encoded_inputs["overflow_to_sample_mapping"] ) lowerCamelCase_ = images return encoded_inputs def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowerCamelCase_ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(UpperCamelCase ) != len(UpperCamelCase ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f''' {len(UpperCamelCase )} and {len(UpperCamelCase )}''' ) return images_with_overflow def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def snake_case ( self ): """simple docstring""" return ["input_ids", "bbox", "attention_mask", "image"] @property def snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase , ) return self.image_processor_class @property def snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase , ) return self.image_processor
675
'''simple docstring''' import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = OpenAIGPTTokenizer _lowerCamelCase = OpenAIGPTTokenizerFast _lowerCamelCase = True _lowerCamelCase = False def snake_case ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) lowerCamelCase_ = ["#version: 0.2", "l o", "lo w", "e r</w>", ""] lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(UpperCamelCase ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(UpperCamelCase ) ) def snake_case ( self , UpperCamelCase ): """simple docstring""" return "lower newer", "lower newer" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) lowerCamelCase_ = "lower" lowerCamelCase_ = ["low", "er</w>"] lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = tokens + ["<unk>"] lowerCamelCase_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase ) def snake_case ( self , UpperCamelCase=15 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) # Simple input lowerCamelCase_ = "This is a simple input" lowerCamelCase_ = ["This is a simple input 1", "This is a simple input 2"] lowerCamelCase_ = ("This is a simple input", "This is a pair") lowerCamelCase_ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Simple input self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Simple input self.assertRaises( UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , ) # Pair input self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Pair input self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Pair input self.assertRaises( UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , ) def snake_case ( self ): """simple docstring""" pass @require_ftfy @require_spacy @require_tokenizers class snake_case ( lowercase ): """simple docstring""" pass
675
1
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = (DEISMultistepScheduler,) _lowerCamelCase = (("num_inference_steps", 25),) def snake_case ( self , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = { "num_train_timesteps": 1000, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, } config.update(**UpperCamelCase ) return config def snake_case ( self , UpperCamelCase=0 , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = dict(self.forward_default_kwargs ) lowerCamelCase_ = kwargs.pop("num_inference_steps" , UpperCamelCase ) lowerCamelCase_ = self.dummy_sample lowerCamelCase_ = 0.1 * sample lowerCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowerCamelCase_ = self.get_scheduler_config(**UpperCamelCase ) lowerCamelCase_ = scheduler_class(**UpperCamelCase ) scheduler.set_timesteps(UpperCamelCase ) # copy over dummy past residuals lowerCamelCase_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase ) lowerCamelCase_ = scheduler_class.from_pretrained(UpperCamelCase ) new_scheduler.set_timesteps(UpperCamelCase ) # copy over dummy past residuals lowerCamelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCamelCase_ ,lowerCamelCase_ = sample, sample for t in range(UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): lowerCamelCase_ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample lowerCamelCase_ = new_scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def snake_case ( self ): """simple docstring""" pass def snake_case ( self , UpperCamelCase=0 , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = dict(self.forward_default_kwargs ) lowerCamelCase_ = kwargs.pop("num_inference_steps" , UpperCamelCase ) lowerCamelCase_ = self.dummy_sample lowerCamelCase_ = 0.1 * sample lowerCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowerCamelCase_ = self.get_scheduler_config() lowerCamelCase_ = scheduler_class(**UpperCamelCase ) scheduler.set_timesteps(UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) lowerCamelCase_ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase ) lowerCamelCase_ = scheduler_class.from_pretrained(UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) lowerCamelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCamelCase_ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample lowerCamelCase_ = new_scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def snake_case ( self , UpperCamelCase=None , **UpperCamelCase ): """simple docstring""" if scheduler is None: lowerCamelCase_ = self.scheduler_classes[0] lowerCamelCase_ = self.get_scheduler_config(**UpperCamelCase ) lowerCamelCase_ = scheduler_class(**UpperCamelCase ) lowerCamelCase_ = self.scheduler_classes[0] lowerCamelCase_ = self.get_scheduler_config(**UpperCamelCase ) lowerCamelCase_ = scheduler_class(**UpperCamelCase ) lowerCamelCase_ = 10 lowerCamelCase_ = self.dummy_model() lowerCamelCase_ = self.dummy_sample_deter scheduler.set_timesteps(UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample return sample def snake_case ( self ): """simple docstring""" lowerCamelCase_ = dict(self.forward_default_kwargs ) lowerCamelCase_ = kwargs.pop("num_inference_steps" , UpperCamelCase ) for scheduler_class in self.scheduler_classes: lowerCamelCase_ = self.get_scheduler_config() lowerCamelCase_ = scheduler_class(**UpperCamelCase ) lowerCamelCase_ = self.dummy_sample lowerCamelCase_ = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCamelCase , "set_timesteps" ): scheduler.set_timesteps(UpperCamelCase ) elif num_inference_steps is not None and not hasattr(UpperCamelCase , "set_timesteps" ): lowerCamelCase_ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10] lowerCamelCase_ = dummy_past_residuals[: scheduler.config.solver_order] lowerCamelCase_ = scheduler.timesteps[5] lowerCamelCase_ = scheduler.timesteps[6] lowerCamelCase_ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample lowerCamelCase_ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def snake_case ( self ): """simple docstring""" # make sure that iterating over schedulers with same config names gives same results # for defaults lowerCamelCase_ = DEISMultistepScheduler(**self.get_scheduler_config() ) lowerCamelCase_ = self.full_loop(scheduler=UpperCamelCase ) lowerCamelCase_ = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_mean.item() - 0.23_916 ) < 1e-3 lowerCamelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config ) lowerCamelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config ) lowerCamelCase_ = UniPCMultistepScheduler.from_config(scheduler.config ) lowerCamelCase_ = DEISMultistepScheduler.from_config(scheduler.config ) lowerCamelCase_ = self.full_loop(scheduler=UpperCamelCase ) lowerCamelCase_ = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_mean.item() - 0.23_916 ) < 1e-3 def snake_case ( self ): """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=UpperCamelCase ) def snake_case ( self ): """simple docstring""" self.check_over_configs(thresholding=UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=UpperCamelCase , prediction_type=UpperCamelCase , sample_max_value=UpperCamelCase , algorithm_type="deis" , solver_order=UpperCamelCase , solver_type=UpperCamelCase , ) def snake_case ( self ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase ) def snake_case ( self ): """simple docstring""" for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=UpperCamelCase , solver_type=UpperCamelCase , prediction_type=UpperCamelCase , algorithm_type=UpperCamelCase , ) lowerCamelCase_ = self.full_loop( solver_order=UpperCamelCase , solver_type=UpperCamelCase , prediction_type=UpperCamelCase , algorithm_type=UpperCamelCase , ) assert not torch.isnan(UpperCamelCase ).any(), "Samples have nan numbers" def snake_case ( self ): """simple docstring""" self.check_over_configs(lower_order_final=UpperCamelCase ) self.check_over_configs(lower_order_final=UpperCamelCase ) def snake_case ( self ): """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=UpperCamelCase , time_step=0 ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.full_loop() lowerCamelCase_ = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_mean.item() - 0.23_916 ) < 1e-3 def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.full_loop(prediction_type="v_prediction" ) lowerCamelCase_ = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_mean.item() - 0.091 ) < 1e-3 def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.scheduler_classes[0] lowerCamelCase_ = self.get_scheduler_config(thresholding=UpperCamelCase , dynamic_thresholding_ratio=0 ) lowerCamelCase_ = scheduler_class(**UpperCamelCase ) lowerCamelCase_ = 10 lowerCamelCase_ = self.dummy_model() lowerCamelCase_ = self.dummy_sample_deter.half() scheduler.set_timesteps(UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa
675
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer a_ : Optional[int] = logging.get_logger(__name__) a_ : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} a_ : int = { """vocab_file""": { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt""" ), } } a_ : Any = { """junnyu/roformer_chinese_small""": 1536, """junnyu/roformer_chinese_base""": 1536, """junnyu/roformer_chinese_char_small""": 512, """junnyu/roformer_chinese_char_base""": 512, """junnyu/roformer_small_discriminator""": 128, """junnyu/roformer_small_generator""": 128, } a_ : List[Any] = { """junnyu/roformer_chinese_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_base""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True}, """junnyu/roformer_small_discriminator""": {"""do_lower_case""": True}, """junnyu/roformer_small_generator""": {"""do_lower_case""": True}, } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase = RoFormerTokenizer def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ): """simple docstring""" super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , ) lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("lowercase" , UpperCamelCase ) != do_lower_case or pre_tok_state.get("strip_accents" , UpperCamelCase ) != strip_accents ): lowerCamelCase_ = getattr(UpperCamelCase , pre_tok_state.pop("type" ) ) lowerCamelCase_ = do_lower_case lowerCamelCase_ = strip_accents lowerCamelCase_ = pre_tok_class(**UpperCamelCase ) lowerCamelCase_ = do_lower_case def __getstate__( self ): """simple docstring""" lowerCamelCase_ = self.__dict__.copy() lowerCamelCase_ = BertPreTokenizer() return state def __setstate__( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = d lowerCamelCase_ = self.__dict__["_tokenizer"].get_vocab() lowerCamelCase_ = PreTokenizer.custom(JiebaPreTokenizer(UpperCamelCase ) ) def snake_case ( self , UpperCamelCase , UpperCamelCase=None ): """simple docstring""" lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case ( self , UpperCamelCase , UpperCamelCase = None ): """simple docstring""" lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , **UpperCamelCase , ): """simple docstring""" lowerCamelCase_ = BertPreTokenizer() return super().save_pretrained(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
675
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() a_ : List[str] = logging.get_logger(__name__) def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]=False ): lowerCamelCase_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" lowerCamelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict=False ): for i in range(config.num_hidden_layers ): if base_model: lowerCamelCase_ = "" else: lowerCamelCase_ = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCamelCase_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) lowerCamelCase_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCamelCase_ = in_proj_weight[ : config.hidden_size, : ] lowerCamelCase_ = in_proj_bias[: config.hidden_size] lowerCamelCase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCamelCase_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCamelCase_ = in_proj_weight[ -config.hidden_size :, : ] lowerCamelCase_ = in_proj_bias[-config.hidden_size :] def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ): lowerCamelCase_ = dct.pop(UpperCAmelCase_ ) lowerCamelCase_ = val def __snake_case ( ): lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCamelCase_ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ) return im @torch.no_grad() def __snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ): lowerCamelCase_ = DeiTConfig() # all deit models have fine-tuned heads lowerCamelCase_ = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size lowerCamelCase_ = 1000 lowerCamelCase_ = "huggingface/label-files" lowerCamelCase_ = "imagenet-1k-id2label.json" lowerCamelCase_ = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="dataset" ) , "r" ) ) lowerCamelCase_ = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()} lowerCamelCase_ = idalabel lowerCamelCase_ = {v: k for k, v in idalabel.items()} lowerCamelCase_ = int(deit_name[-6:-4] ) lowerCamelCase_ = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): lowerCamelCase_ = 192 lowerCamelCase_ = 768 lowerCamelCase_ = 12 lowerCamelCase_ = 3 elif deit_name[9:].startswith("small" ): lowerCamelCase_ = 384 lowerCamelCase_ = 1536 lowerCamelCase_ = 12 lowerCamelCase_ = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): lowerCamelCase_ = 1024 lowerCamelCase_ = 4096 lowerCamelCase_ = 24 lowerCamelCase_ = 16 # load original model from timm lowerCamelCase_ = timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_ ) timm_model.eval() # load state_dict of original model, remove and rename some keys lowerCamelCase_ = timm_model.state_dict() lowerCamelCase_ = create_rename_keys(UpperCAmelCase_ , UpperCAmelCase_ ) for src, dest in rename_keys: rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # load HuggingFace model lowerCamelCase_ = DeiTForImageClassificationWithTeacher(UpperCAmelCase_ ).eval() model.load_state_dict(UpperCAmelCase_ ) # Check outputs on an image, prepared by DeiTImageProcessor lowerCamelCase_ = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 lowerCamelCase_ = DeiTImageProcessor(size=UpperCAmelCase_ , crop_size=config.image_size ) lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" ) lowerCamelCase_ = encoding["pixel_values"] lowerCamelCase_ = model(UpperCAmelCase_ ) lowerCamelCase_ = timm_model(UpperCAmelCase_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(UpperCAmelCase_ , outputs.logits , atol=1E-3 ) Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ ) print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase_ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": a_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--deit_name""", default="""vit_deit_base_distilled_patch16_224""", type=str, help="""Name of the DeiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a_ : Dict = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
675
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=32 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=[10, 20, 30, 40] , UpperCamelCase=[2, 2, 3, 2] , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=10 , UpperCamelCase=0.02 , UpperCamelCase=["stage2", "stage3", "stage4"] , UpperCamelCase=3 , UpperCamelCase=None , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = num_channels lowerCamelCase_ = num_stages lowerCamelCase_ = hidden_sizes lowerCamelCase_ = depths lowerCamelCase_ = is_training lowerCamelCase_ = use_labels lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = out_features lowerCamelCase_ = num_labels lowerCamelCase_ = scope lowerCamelCase_ = num_stages def snake_case ( self ): """simple docstring""" lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def snake_case ( self ): """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , ) def snake_case ( self ): """simple docstring""" return UperNetConfig( backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = UperNetForSemanticSegmentation(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowerCamelCase_ = model(UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = (UperNetForSemanticSegmentation,) if is_torch_available() else () _lowerCamelCase = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def snake_case ( self ): """simple docstring""" lowerCamelCase_ = UperNetModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case ( self ): """simple docstring""" return def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCamelCase ) lowerCamelCase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase ) @unittest.skip(reason="UperNet does not use inputs_embeds" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="UperNet does not support input and output embeddings" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="UperNet does not have a base model" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip(reason="UperNet does not have a base model" ) def snake_case ( self ): """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def snake_case ( self ): """simple docstring""" pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = _config_zero_init(UpperCamelCase ) lowerCamelCase_ = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowerCamelCase_ = model_class(config=UpperCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip(reason="UperNet does not have tied weights" ) def snake_case ( self ): """simple docstring""" pass @slow def snake_case ( self ): """simple docstring""" for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def __snake_case ( ): lowerCamelCase_ = hf_hub_download( repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" ) lowerCamelCase_ = Image.open(UpperCAmelCase_ ).convert("RGB" ) return image @require_torch @require_vision @slow class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" ) lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(UpperCamelCase ) lowerCamelCase_ = prepare_img() lowerCamelCase_ = processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase ) with torch.no_grad(): lowerCamelCase_ = model(**UpperCamelCase ) lowerCamelCase_ = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowerCamelCase_ = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" ) lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(UpperCamelCase ) lowerCamelCase_ = prepare_img() lowerCamelCase_ = processor(images=UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase ) with torch.no_grad(): lowerCamelCase_ = model(**UpperCamelCase ) lowerCamelCase_ = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowerCamelCase_ = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
675
1
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class snake_case ( datasets.BuilderConfig ): """simple docstring""" _lowerCamelCase = None class snake_case ( datasets.ArrowBasedBuilder ): """simple docstring""" _lowerCamelCase = PandasConfig def snake_case ( self ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def snake_case ( self , UpperCamelCase ): """simple docstring""" if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) lowerCamelCase_ = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCamelCase , (str, list, tuple) ): lowerCamelCase_ = data_files if isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive lowerCamelCase_ = [dl_manager.iter_files(UpperCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] lowerCamelCase_ = [] for split_name, files in data_files.items(): if isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive lowerCamelCase_ = [dl_manager.iter_files(UpperCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCamelCase , gen_kwargs={"files": files} ) ) return splits def snake_case ( self , UpperCamelCase ): """simple docstring""" if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example lowerCamelCase_ = table_cast(UpperCamelCase , self.config.features.arrow_schema ) return pa_table def snake_case ( self , UpperCamelCase ): """simple docstring""" for i, file in enumerate(itertools.chain.from_iterable(UpperCamelCase ) ): with open(UpperCamelCase , "rb" ) as f: lowerCamelCase_ = pa.Table.from_pandas(pd.read_pickle(UpperCamelCase ) ) yield i, self._cast_table(UpperCamelCase )
675
'''simple docstring''' from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration a_ : Optional[int] = HfArgumentParser(InitializationArguments) a_ : str = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization a_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks a_ : str = { """vocab_size""": len(tokenizer), """scale_attn_by_inverse_layer_idx""": True, """reorder_and_upcast_attn""": True, } # Load model config (GPT-2 large in this case) a_ : Optional[Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config a_ : Optional[Any] = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
675
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : Dict = { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json""" ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = "roformer" def __init__( self , UpperCamelCase=5_0000 , UpperCamelCase=None , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=1536 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1e-12 , UpperCamelCase=0 , UpperCamelCase=False , UpperCamelCase=True , **UpperCamelCase , ): """simple docstring""" super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase ) lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size if embedding_size is None else embedding_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = hidden_act lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = rotary_value lowerCamelCase_ = use_cache class snake_case ( lowercase ): """simple docstring""" @property def snake_case ( self ): """simple docstring""" if self.task == "multiple-choice": lowerCamelCase_ = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCamelCase_ = {0: "batch", 1: "sequence"} lowerCamelCase_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
675
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version a_ : List[Any] = get_logger(__name__) class snake_case : """simple docstring""" _lowerCamelCase = "dummy_data" _lowerCamelCase = "datasets" _lowerCamelCase = False def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = True , UpperCamelCase = None , ): """simple docstring""" lowerCamelCase_ = 0 lowerCamelCase_ = dataset_name lowerCamelCase_ = cache_dir lowerCamelCase_ = use_local_dummy_data lowerCamelCase_ = config # download_callbacks take a single url as input lowerCamelCase_ = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root lowerCamelCase_ = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general lowerCamelCase_ = str(UpperCamelCase ) # to be downloaded lowerCamelCase_ = None lowerCamelCase_ = None @property def snake_case ( self ): """simple docstring""" if self._dummy_file is None: lowerCamelCase_ = self.download_dummy_data() return self._dummy_file @property def snake_case ( self ): """simple docstring""" if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def snake_case ( self ): """simple docstring""" return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) lowerCamelCase_ = cached_path( UpperCamelCase , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase , force_extract=UpperCamelCase ) return os.path.join(UpperCamelCase , self.dummy_file_name ) @property def snake_case ( self ): """simple docstring""" return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def snake_case ( self ): """simple docstring""" if self._bucket_url is None: lowerCamelCase_ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def snake_case ( self ): """simple docstring""" # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def snake_case ( self , UpperCamelCase , *UpperCamelCase ): """simple docstring""" if self.load_existing_dummy_data: # dummy data is downloaded and tested lowerCamelCase_ = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned lowerCamelCase_ = self.dummy_file_name # special case when data_url is a dict if isinstance(UpperCamelCase , UpperCamelCase ): return self.create_dummy_data_dict(UpperCamelCase , UpperCamelCase ) elif isinstance(UpperCamelCase , (list, tuple) ): return self.create_dummy_data_list(UpperCamelCase , UpperCamelCase ) else: return self.create_dummy_data_single(UpperCamelCase , UpperCamelCase ) def snake_case ( self , UpperCamelCase , *UpperCamelCase ): """simple docstring""" return self.download_and_extract(UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" return self.download_and_extract(UpperCamelCase ) def snake_case ( self , UpperCamelCase , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return path def snake_case ( self ): """simple docstring""" return {} def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(UpperCamelCase , UpperCamelCase ): for single_url in single_urls: download_callback(UpperCamelCase ) else: lowerCamelCase_ = single_urls download_callback(UpperCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = [os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) ) for x in single_urls] else: lowerCamelCase_ = single_urls lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) ) lowerCamelCase_ = value # make sure that values are unique if all(isinstance(UpperCamelCase , UpperCamelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique lowerCamelCase_ = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one lowerCamelCase_ = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , UpperCamelCase ) ) for url in data_url ) lowerCamelCase_ = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): lowerCamelCase_ = [data_url[0]] * len(UpperCamelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(UpperCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(UpperCamelCase ) return dummy_data_list def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" for download_callback in self.download_callbacks: download_callback(UpperCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(UpperCamelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" pass def snake_case ( self , UpperCamelCase ): """simple docstring""" def _iter_archive_members(UpperCamelCase ): # this preserves the order of the members inside the ZIP archive lowerCamelCase_ = Path(self.dummy_file ).parent lowerCamelCase_ = path.relative_to(UpperCamelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: lowerCamelCase_ = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(UpperCamelCase ) lowerCamelCase_ = Path(UpperCamelCase ) lowerCamelCase_ = _iter_archive_members(UpperCamelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(UpperCamelCase ).as_posix(), file_path.open("rb" ) def snake_case ( self , UpperCamelCase ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = [paths] for path in paths: if os.path.isfile(UpperCamelCase ): if os.path.basename(UpperCamelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(UpperCamelCase ): if os.path.basename(UpperCamelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(UpperCamelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(UpperCamelCase , UpperCamelCase )
675
1
'''simple docstring''' a_ : str = 256 # Modulus to hash a string a_ : List[str] = 1000003 def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ): lowerCamelCase_ = len(UpperCAmelCase_ ) lowerCamelCase_ = len(UpperCAmelCase_ ) if p_len > t_len: return False lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 1 # Calculating the hash of pattern and substring of text for i in range(UpperCAmelCase_ ): lowerCamelCase_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus lowerCamelCase_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue lowerCamelCase_ = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash lowerCamelCase_ = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def __snake_case ( ): lowerCamelCase_ = "abc1abc12" lowerCamelCase_ = "alskfjaldsabc1abc1abc12k23adsfabcabc" lowerCamelCase_ = "alskfjaldsk23adsfabcabc" assert rabin_karp(UpperCAmelCase_ , UpperCAmelCase_ ) and not rabin_karp(UpperCAmelCase_ , UpperCAmelCase_ ) # Test 2) lowerCamelCase_ = "ABABX" lowerCamelCase_ = "ABABZABABYABABX" assert rabin_karp(UpperCAmelCase_ , UpperCAmelCase_ ) # Test 3) lowerCamelCase_ = "AAAB" lowerCamelCase_ = "ABAAAAAB" assert rabin_karp(UpperCAmelCase_ , UpperCAmelCase_ ) # Test 4) lowerCamelCase_ = "abcdabcy" lowerCamelCase_ = "abcxabcdabxabcdabcdabcy" assert rabin_karp(UpperCAmelCase_ , UpperCAmelCase_ ) # Test 5) lowerCamelCase_ = "Lü" lowerCamelCase_ = "Lüsai" assert rabin_karp(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = "Lue" assert not rabin_karp(UpperCAmelCase_ , UpperCAmelCase_ ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
675
'''simple docstring''' import os def __snake_case ( UpperCAmelCase_ : str = "matrix.txt" ): with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as in_file: lowerCamelCase_ = in_file.read() lowerCamelCase_ = [[int(UpperCAmelCase_ ) for cell in row.split("," )] for row in data.strip().splitlines()] lowerCamelCase_ = [[0 for cell in row] for row in grid] lowerCamelCase_ = len(grid[0] ) lowerCamelCase_ = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )] lowerCamelCase_ = grid[0][0] for i in range(1 , UpperCAmelCase_ ): lowerCamelCase_ = grid[0][i] + dp[0][i - 1] for i in range(1 , UpperCAmelCase_ ): lowerCamelCase_ = grid[i][0] + dp[i - 1][0] for i in range(1 , UpperCAmelCase_ ): for j in range(1 , UpperCAmelCase_ ): lowerCamelCase_ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(f'''{solution() = }''')
675
1
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py a_ : Dict = """src/transformers""" a_ : Optional[Any] = """docs/source/en/tasks""" def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ): with open(UpperCAmelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f: lowerCamelCase_ = f.readlines() # Find the start prompt. lowerCamelCase_ = 0 while not lines[start_index].startswith(UpperCAmelCase_ ): start_index += 1 start_index += 1 lowerCamelCase_ = start_index while not lines[end_index].startswith(UpperCAmelCase_ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. a_ : Any = direct_transformers_import(TRANSFORMERS_PATH) a_ : str = { """asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, """audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, """language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, """image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, """masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, """multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, """object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, """question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, """semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, """sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, """summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, """token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, """translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, """video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, """document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, """monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). a_ : List[Any] = { """summarization.md""": ("""nllb""",), """translation.md""": ("""nllb""",), } def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = TASK_GUIDE_TO_MODELS[task_guide] lowerCamelCase_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCAmelCase_ , set() ) lowerCamelCase_ = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n" def __snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]=False ): lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = _find_text_in_file( filename=os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , ) lowerCamelCase_ = get_model_list_for_task(UpperCAmelCase_ ) if current_list != new_list: if overwrite: with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`''' " to fix this." ) if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") a_ : Dict = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
675
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = inspect.getfile(accelerate.test_utils ) lowerCamelCase_ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 lowerCamelCase_ = test_metrics @require_cpu def snake_case ( self ): """simple docstring""" debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def snake_case ( self ): """simple docstring""" debug_launcher(self.test_metrics.main ) @require_single_gpu def snake_case ( self ): """simple docstring""" self.test_metrics.main() @require_multi_gpu def snake_case ( self ): """simple docstring""" print(f'''Found {torch.cuda.device_count()} devices.''' ) lowerCamelCase_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
675
1
'''simple docstring''' import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow a_ : int = False class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self , UpperCamelCase=32 ): """simple docstring""" set_seed(0 ) lowerCamelCase_ = UNetaDModel(sample_size=UpperCamelCase , in_channels=3 , out_channels=3 ) lowerCamelCase_ = torch.optim.SGD(model.parameters() , lr=0.0_001 ) return model, optimizer @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable lowerCamelCase_ = DDPMScheduler( num_train_timesteps=1000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=UpperCamelCase , ) lowerCamelCase_ = DDIMScheduler( num_train_timesteps=1000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=UpperCamelCase , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) lowerCamelCase_ = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase ) for _ in range(4 )] lowerCamelCase_ = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase ) for _ in range(4 )] lowerCamelCase_ = [torch.randint(0 , 1000 , (4,) ).long().to(UpperCamelCase ) for _ in range(4 )] # train with a DDPM scheduler lowerCamelCase_ ,lowerCamelCase_ = self.get_model_optimizer(resolution=32 ) model.train().to(UpperCamelCase ) for i in range(4 ): optimizer.zero_grad() lowerCamelCase_ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCamelCase_ = model(UpperCamelCase , timesteps[i] ).sample lowerCamelCase_ = torch.nn.functional.mse_loss(UpperCamelCase , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM lowerCamelCase_ ,lowerCamelCase_ = self.get_model_optimizer(resolution=32 ) model.train().to(UpperCamelCase ) for i in range(4 ): optimizer.zero_grad() lowerCamelCase_ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCamelCase_ = model(UpperCamelCase , timesteps[i] ).sample lowerCamelCase_ = torch.nn.functional.mse_loss(UpperCamelCase , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) ) self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
675
'''simple docstring''' from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge a_ : Any = [ """Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the""" """ final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe""" """ depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""", """The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal""" """ accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's""" """ founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the""" """ body.""", """Amnesty International releases its annual report on the death penalty. The report catalogs the use of""" """ state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the""" """ world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital""" """ punishment.""", ] a_ : Optional[Any] = [ """Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""" """ Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz""" """ had informed his Lufthansa training school of an episode of severe depression, airline says .""", """Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .""" """ Israel and the United States opposed the move, which could open the door to war crimes investigations against""" """ Israelis .""", """Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to""" """ death . Organization claims that governments around the world are using the threat of terrorism to advance""" """ executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death""" """ sentences up by 28% .""", ] def __snake_case ( ): lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2", "rougeL"] ) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2"] ) assert ( pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean() ) def __snake_case ( ): lowerCamelCase_ = "rougeLsum" lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k] lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k] assert score > score_no_sep def __snake_case ( ): lowerCamelCase_ = ["rouge1", "rouge2", "rougeL"] lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ ) lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ ) assert score_sep == score_no_sep def __snake_case ( ): lowerCamelCase_ = [ "Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.", "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .", ] lowerCamelCase_ = [ "Margot Frank, died in 1945, a month earlier than previously thought.", "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of" " the final seconds on board Flight 9525.", ] assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) def __snake_case ( ): lowerCamelCase_ = [ "\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" " ] lowerCamelCase_ = [ " Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ." ] lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] , newline_sep=UpperCAmelCase_ )["rougeLsum"] lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] )["rougeLsum"] assert new_score > prev_score def __snake_case ( ): lowerCamelCase_ = Path("examples/seq2seq/test_data/wmt_en_ro" ) lowerCamelCase_ = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) ) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = calculate_rouge_path( data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=UpperCAmelCase_ ) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
675
1
'''simple docstring''' def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ): lowerCamelCase_ = [0 for i in range(r + 1 )] # nc0 = 1 lowerCamelCase_ = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. lowerCamelCase_ = min(UpperCAmelCase_ , UpperCAmelCase_ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
675
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() a_ : Optional[Any] = logging.get_logger("""transformers.models.encodec""") a_ : List[str] = { """quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""", """quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""", """quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""", """quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""", } a_ : Optional[int] = { """encoder.model.0.conv.conv""": """encoder.layers.0.conv""", """encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""", """encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""", """encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""", """encoder.model.3.conv.conv""": """encoder.layers.3.conv""", """encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""", """encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""", """encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""", """encoder.model.6.conv.conv""": """encoder.layers.6.conv""", """encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""", """encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""", """encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""", """encoder.model.9.conv.conv""": """encoder.layers.9.conv""", """encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""", """encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""", """encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""", """encoder.model.12.conv.conv""": """encoder.layers.12.conv""", """encoder.model.13.lstm""": """encoder.layers.13.lstm""", """encoder.model.15.conv.conv""": """encoder.layers.15.conv""", } a_ : Tuple = { """encoder.model.0.conv.norm""": """encoder.layers.0.norm""", """encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""", """encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""", """encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""", """encoder.model.3.conv.norm""": """encoder.layers.3.norm""", """encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""", """encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""", """encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""", """encoder.model.6.conv.norm""": """encoder.layers.6.norm""", """encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""", """encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""", """encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""", """encoder.model.9.conv.norm""": """encoder.layers.9.norm""", """encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""", """encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""", """encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""", """encoder.model.12.conv.norm""": """encoder.layers.12.norm""", """encoder.model.15.conv.norm""": """encoder.layers.15.norm""", } a_ : Union[str, Any] = { """decoder.model.0.conv.conv""": """decoder.layers.0.conv""", """decoder.model.1.lstm""": """decoder.layers.1.lstm""", """decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""", """decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""", """decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""", """decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""", """decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""", """decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""", """decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""", """decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""", """decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""", """decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""", """decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""", """decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""", """decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""", """decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""", """decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""", """decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""", """decoder.model.15.conv.conv""": """decoder.layers.15.conv""", } a_ : Union[str, Any] = { """decoder.model.0.conv.norm""": """decoder.layers.0.norm""", """decoder.model.3.convtr.norm""": """decoder.layers.3.norm""", """decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""", """decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""", """decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""", """decoder.model.6.convtr.norm""": """decoder.layers.6.norm""", """decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""", """decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""", """decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""", """decoder.model.9.convtr.norm""": """decoder.layers.9.norm""", """decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""", """decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""", """decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""", """decoder.model.12.convtr.norm""": """decoder.layers.12.norm""", """decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""", """decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""", """decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""", """decoder.model.15.conv.norm""": """decoder.layers.15.norm""", } a_ : Optional[Any] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } a_ : List[str] = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } a_ : Any = [] a_ : str = [] def __snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ): for attribute in key.split("." ): lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) if weight_type is not None: lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape else: lowerCamelCase_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase_ = value elif weight_type == "weight_g": lowerCamelCase_ = value elif weight_type == "weight_v": lowerCamelCase_ = value elif weight_type == "bias": lowerCamelCase_ = value elif weight_type == "running_mean": lowerCamelCase_ = value elif weight_type == "running_var": lowerCamelCase_ = value elif weight_type == "num_batches_tracked": lowerCamelCase_ = value elif weight_type == "weight_ih_l0": lowerCamelCase_ = value elif weight_type == "weight_hh_l0": lowerCamelCase_ = value elif weight_type == "bias_ih_l0": lowerCamelCase_ = value elif weight_type == "bias_hh_l0": lowerCamelCase_ = value elif weight_type == "weight_ih_l1": lowerCamelCase_ = value elif weight_type == "weight_hh_l1": lowerCamelCase_ = value elif weight_type == "bias_ih_l1": lowerCamelCase_ = value elif weight_type == "bias_hh_l1": lowerCamelCase_ = value else: lowerCamelCase_ = value logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ): for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ): lowerCamelCase_ = [] if model_name == "encodec_24khz" or "encodec_32khz": lowerCamelCase_ = MAPPING_24K elif model_name == "encodec_48khz": lowerCamelCase_ = MAPPING_48K else: raise ValueError(F'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(UpperCAmelCase_ , UpperCAmelCase_ ): logger.info(F'''{name} was ignored''' ) continue lowerCamelCase_ = False for key, mapped_key in MAPPING.items(): if "*" in key: lowerCamelCase_ ,lowerCamelCase_ = key.split(".*." ) if prefix in name and suffix in name: lowerCamelCase_ = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith("embed" ) and name.endswith("embed_avg" ): continue lowerCamelCase_ = True if "*" in mapped_key: lowerCamelCase_ = name.split(UpperCAmelCase_ )[0].split("." )[-2] lowerCamelCase_ = mapped_key.replace("*" , UpperCAmelCase_ ) if "weight_g" in name: lowerCamelCase_ = "weight_g" elif "weight_v" in name: lowerCamelCase_ = "weight_v" elif "weight_ih_l0" in name: lowerCamelCase_ = "weight_ih_l0" elif "weight_hh_l0" in name: lowerCamelCase_ = "weight_hh_l0" elif "bias_ih_l0" in name: lowerCamelCase_ = "bias_ih_l0" elif "bias_hh_l0" in name: lowerCamelCase_ = "bias_hh_l0" elif "weight_ih_l1" in name: lowerCamelCase_ = "weight_ih_l1" elif "weight_hh_l1" in name: lowerCamelCase_ = "weight_hh_l1" elif "bias_ih_l1" in name: lowerCamelCase_ = "bias_ih_l1" elif "bias_hh_l1" in name: lowerCamelCase_ = "bias_hh_l1" elif "bias" in name: lowerCamelCase_ = "bias" elif "weight" in name: lowerCamelCase_ = "weight" elif "running_mean" in name: lowerCamelCase_ = "running_mean" elif "running_var" in name: lowerCamelCase_ = "running_var" elif "num_batches_tracked" in name: lowerCamelCase_ = "num_batches_tracked" else: lowerCamelCase_ = None set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) continue if not is_used: unused_weights.append(UpperCAmelCase_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) @torch.no_grad() def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[int]=None , ): if config_path is not None: lowerCamelCase_ = EncodecConfig.from_pretrained(UpperCAmelCase_ ) else: lowerCamelCase_ = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": lowerCamelCase_ = [8, 5, 4, 4] lowerCamelCase_ = [2.2] lowerCamelCase_ = 64 lowerCamelCase_ = 32000 lowerCamelCase_ = 2048 lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False elif model_name == "encodec_48khz": lowerCamelCase_ = [8, 5, 4, 2] lowerCamelCase_ = [3.0, 6.0, 12.0, 24.0] lowerCamelCase_ = 48000 lowerCamelCase_ = 2 lowerCamelCase_ = False lowerCamelCase_ = "time_group_norm" lowerCamelCase_ = True lowerCamelCase_ = 1.0 lowerCamelCase_ = 0.01 else: raise ValueError(F'''Unknown model name: {model_name}''' ) lowerCamelCase_ = EncodecModel(UpperCAmelCase_ ) lowerCamelCase_ = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = torch.load(UpperCAmelCase_ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights lowerCamelCase_ = original_checkpoint["best_state"] recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) model.save_pretrained(UpperCAmelCase_ ) if repo_id: print("Pushing to the hub..." ) feature_extractor.push_to_hub(UpperCAmelCase_ ) model.push_to_hub(UpperCAmelCase_ ) if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() parser.add_argument( """--model""", default="""encodec_24khz""", type=str, help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""", ) parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) a_ : str = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
675
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = 42 class snake_case ( lowercase , lowercase ): """simple docstring""" @register_to_config def __init__( self , UpperCamelCase = 3 , UpperCamelCase = 3 , UpperCamelCase = ("DownEncoderBlock2D",) , UpperCamelCase = ("UpDecoderBlock2D",) , UpperCamelCase = (64,) , UpperCamelCase = 1 , UpperCamelCase = "silu" , UpperCamelCase = 3 , UpperCamelCase = 32 , UpperCamelCase = 256 , UpperCamelCase = 32 , UpperCamelCase = None , UpperCamelCase = 0.18_215 , UpperCamelCase = "group" , ): """simple docstring""" super().__init__() # pass init params to Encoder lowerCamelCase_ = Encoder( in_channels=UpperCamelCase , out_channels=UpperCamelCase , down_block_types=UpperCamelCase , block_out_channels=UpperCamelCase , layers_per_block=UpperCamelCase , act_fn=UpperCamelCase , norm_num_groups=UpperCamelCase , double_z=UpperCamelCase , ) lowerCamelCase_ = vq_embed_dim if vq_embed_dim is not None else latent_channels lowerCamelCase_ = nn.Convad(UpperCamelCase , UpperCamelCase , 1 ) lowerCamelCase_ = VectorQuantizer(UpperCamelCase , UpperCamelCase , beta=0.25 , remap=UpperCamelCase , sane_index_shape=UpperCamelCase ) lowerCamelCase_ = nn.Convad(UpperCamelCase , UpperCamelCase , 1 ) # pass init params to Decoder lowerCamelCase_ = Decoder( in_channels=UpperCamelCase , out_channels=UpperCamelCase , up_block_types=UpperCamelCase , block_out_channels=UpperCamelCase , layers_per_block=UpperCamelCase , act_fn=UpperCamelCase , norm_num_groups=UpperCamelCase , norm_type=UpperCamelCase , ) @apply_forward_hook def snake_case ( self , UpperCamelCase , UpperCamelCase = True ): """simple docstring""" lowerCamelCase_ = self.encoder(UpperCamelCase ) lowerCamelCase_ = self.quant_conv(UpperCamelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=UpperCamelCase ) @apply_forward_hook def snake_case ( self , UpperCamelCase , UpperCamelCase = False , UpperCamelCase = True ): """simple docstring""" # also go through quantization layer if not force_not_quantize: lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = self.quantize(UpperCamelCase ) else: lowerCamelCase_ = h lowerCamelCase_ = self.post_quant_conv(UpperCamelCase ) lowerCamelCase_ = self.decoder(UpperCamelCase , quant if self.config.norm_type == "spatial" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase = True ): """simple docstring""" lowerCamelCase_ = sample lowerCamelCase_ = self.encode(UpperCamelCase ).latents lowerCamelCase_ = self.decode(UpperCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=UpperCamelCase )
675
'''simple docstring''' from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = "arrow" , **UpperCamelCase , ): """simple docstring""" super().__init__( split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , **UpperCamelCase , ) lowerCamelCase_ = load_from_cache_file lowerCamelCase_ = file_format lowerCamelCase_ = Spark( df=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , working_dir=UpperCamelCase , **UpperCamelCase , ) def snake_case ( self ): """simple docstring""" if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCamelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
675
1
'''simple docstring''' import math from collections.abc import Callable def __snake_case ( UpperCAmelCase_ : Callable[[float], float] , UpperCAmelCase_ : float , UpperCAmelCase_ : float ): lowerCamelCase_ = xa lowerCamelCase_ = xa while True: if x_n == x_na or function(UpperCAmelCase_ ) == function(UpperCAmelCase_ ): raise ZeroDivisionError("float division by zero, could not find root" ) lowerCamelCase_ = x_na - ( function(UpperCAmelCase_ ) / ((function(UpperCAmelCase_ ) - function(UpperCAmelCase_ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na lowerCamelCase_ = x_na lowerCamelCase_ = x_na def __snake_case ( UpperCAmelCase_ : float ): return math.pow(UpperCAmelCase_ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
675
'''simple docstring''' def __snake_case ( ): lowerCamelCase_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] lowerCamelCase_ = 6 lowerCamelCase_ = 1 lowerCamelCase_ = 1901 lowerCamelCase_ = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 lowerCamelCase_ = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 lowerCamelCase_ = day - 29 else: if day > days_per_month[month - 1]: month += 1 lowerCamelCase_ = day - days_per_month[month - 2] if month > 12: year += 1 lowerCamelCase_ = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
675
1
'''simple docstring''' import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets a_ : Optional[Any] = datasets.logging.get_logger(__name__) a_ : List[str] = """\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } """ a_ : List[str] = """\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project's README at https://github.com/google-research/bleurt#readme for more information. """ a_ : Optional[Any] = """ BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: 'scores': List of scores. Examples: >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> bleurt = datasets.load_metric(\"bleurt\") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results[\"scores\"]]) [1.03, 1.04] """ a_ : Optional[Any] = { """bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""", """bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""", """bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""", """bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""", """bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""", """bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""", """BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""", """BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""", """BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""", """BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""", } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): """simple docstring""" def snake_case ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , ) def snake_case ( self , UpperCamelCase ): """simple docstring""" # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( "Using default BLEURT-Base checkpoint for sequence maximum length 128. " "You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." ) lowerCamelCase_ = "bleurt-base-128" if self.config_name.lower() in CHECKPOINT_URLS: lowerCamelCase_ = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: lowerCamelCase_ = self.config_name.upper() else: raise KeyError( f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' ) # download the model checkpoint specified by self.config_name and set up the scorer lowerCamelCase_ = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) lowerCamelCase_ = score.BleurtScorer(os.path.join(UpperCamelCase , UpperCamelCase ) ) def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.scorer.score(references=UpperCamelCase , candidates=UpperCamelCase ) return {"scores": scores}
675
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a_ : Optional[int] = logging.get_logger(__name__) a_ : Dict = { """SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""", # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = "deformable_detr" _lowerCamelCase = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=3 , UpperCamelCase=300 , UpperCamelCase=1024 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=6 , UpperCamelCase=1024 , UpperCamelCase=8 , UpperCamelCase=0.0 , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=256 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=1.0 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase="sine" , UpperCamelCase="resnet50" , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=False , UpperCamelCase=300 , UpperCamelCase=False , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=1 , UpperCamelCase=1 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=0.1 , UpperCamelCase=0.25 , UpperCamelCase=False , **UpperCamelCase , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) lowerCamelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = backbone_config.get("model_type" ) lowerCamelCase_ = CONFIG_MAPPING[backbone_model_type] lowerCamelCase_ = config_class.from_dict(UpperCamelCase ) lowerCamelCase_ = use_timm_backbone lowerCamelCase_ = backbone_config lowerCamelCase_ = num_channels lowerCamelCase_ = num_queries lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = d_model lowerCamelCase_ = encoder_ffn_dim lowerCamelCase_ = encoder_layers lowerCamelCase_ = encoder_attention_heads lowerCamelCase_ = decoder_ffn_dim lowerCamelCase_ = decoder_layers lowerCamelCase_ = decoder_attention_heads lowerCamelCase_ = dropout lowerCamelCase_ = attention_dropout lowerCamelCase_ = activation_dropout lowerCamelCase_ = activation_function lowerCamelCase_ = init_std lowerCamelCase_ = init_xavier_std lowerCamelCase_ = encoder_layerdrop lowerCamelCase_ = auxiliary_loss lowerCamelCase_ = position_embedding_type lowerCamelCase_ = backbone lowerCamelCase_ = use_pretrained_backbone lowerCamelCase_ = dilation # deformable attributes lowerCamelCase_ = num_feature_levels lowerCamelCase_ = encoder_n_points lowerCamelCase_ = decoder_n_points lowerCamelCase_ = two_stage lowerCamelCase_ = two_stage_num_proposals lowerCamelCase_ = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher lowerCamelCase_ = class_cost lowerCamelCase_ = bbox_cost lowerCamelCase_ = giou_cost # Loss coefficients lowerCamelCase_ = mask_loss_coefficient lowerCamelCase_ = dice_loss_coefficient lowerCamelCase_ = bbox_loss_coefficient lowerCamelCase_ = giou_loss_coefficient lowerCamelCase_ = eos_coefficient lowerCamelCase_ = focal_alpha lowerCamelCase_ = disable_custom_kernels super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase ) @property def snake_case ( self ): """simple docstring""" return self.encoder_attention_heads @property def snake_case ( self ): """simple docstring""" return self.d_model def snake_case ( self ): """simple docstring""" lowerCamelCase_ = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowerCamelCase_ = self.backbone_config.to_dict() lowerCamelCase_ = self.__class__.model_type return output
675
1
'''simple docstring''' def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : int ): if principal <= 0: raise Exception("Principal borrowed must be > 0" ) if rate_per_annum < 0: raise Exception("Rate of interest must be >= 0" ) if years_to_repay <= 0 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): raise Exception("Years to repay must be an integer > 0" ) # Yearly rate is divided by 12 to get monthly rate lowerCamelCase_ = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly lowerCamelCase_ = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
675
'''simple docstring''' import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class snake_case ( pl.LightningModule ): """simple docstring""" def __init__( self , UpperCamelCase ): """simple docstring""" super().__init__() lowerCamelCase_ = model lowerCamelCase_ = 2 lowerCamelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels ) def snake_case ( self ): """simple docstring""" pass def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str ): # load longformer model from model identifier lowerCamelCase_ = LongformerModel.from_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = LightningModel(UpperCAmelCase_ ) lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model lowerCamelCase_ = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase_ ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(UpperCAmelCase_ ) print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' ) if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--longformer_model""", default=None, type=str, required=True, help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""", ) parser.add_argument( """--longformer_question_answering_ckpt_path""", default=None, type=str, required=True, help="""Path the official PyTorch Lightning Checkpoint.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Tuple = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
675
1
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , UpperCamelCase=1000 , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope lowerCamelCase_ = range_bbox def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCamelCase_ = bbox[i, j, 3] lowerCamelCase_ = bbox[i, j, 1] lowerCamelCase_ = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCamelCase_ = bbox[i, j, 2] lowerCamelCase_ = bbox[i, j, 0] lowerCamelCase_ = t lowerCamelCase_ = tf.convert_to_tensor(UpperCamelCase ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModel(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMForMaskedLM(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = TFLayoutLMForSequenceClassification(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = TFLayoutLMForTokenClassification(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMForQuestionAnswering(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) _lowerCamelCase = ( { "feature-extraction": TFLayoutLMModel, "fill-mask": TFLayoutLMForMaskedLM, "text-classification": TFLayoutLMForSequenceClassification, "token-classification": TFLayoutLMForTokenClassification, "zero-shot": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase = False _lowerCamelCase = True _lowerCamelCase = 10 def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFLayoutLMModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def snake_case ( self ): """simple docstring""" pass def __snake_case ( ): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off lowerCamelCase_ = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) lowerCamelCase_ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class snake_case ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) # test the sequence output on [0, :3, :3] lowerCamelCase_ = tf.convert_to_tensor( [[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase , atol=1e-3 ) ) # test the pooled output on [1, :3] lowerCamelCase_ = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , UpperCamelCase , atol=1e-3 ) ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized sequence classification head lowerCamelCase_ = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model( input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar lowerCamelCase_ = outputs.loss lowerCamelCase_ = (2,) self.assertEqual(loss.shape , UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = outputs.logits lowerCamelCase_ = (2, 2) self.assertEqual(logits.shape , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized token classification head lowerCamelCase_ = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model( input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = outputs.logits lowerCamelCase_ = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized token classification head lowerCamelCase_ = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , UpperCamelCase ) self.assertEqual(outputs.end_logits.shape , UpperCamelCase )
675
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a_ : Optional[Any] = { """configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""], """tokenization_ctrl""": ["""CTRLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : str = [ """CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """CTRLForSequenceClassification""", """CTRLLMHeadModel""", """CTRLModel""", """CTRLPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ """TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCTRLForSequenceClassification""", """TFCTRLLMHeadModel""", """TFCTRLModel""", """TFCTRLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
675
1
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator from typing import Any class snake_case : """simple docstring""" def __init__( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = data lowerCamelCase_ = None class snake_case : """simple docstring""" def __init__( self ): """simple docstring""" lowerCamelCase_ = None lowerCamelCase_ = None def __iter__( self ): """simple docstring""" lowerCamelCase_ = self.head while self.head: yield node.data lowerCamelCase_ = node.next if node == self.head: break def __len__( self ): """simple docstring""" return sum(1 for _ in self ) def __repr__( self ): """simple docstring""" return "->".join(str(UpperCamelCase ) for item in iter(self ) ) def snake_case ( self , UpperCamelCase ): """simple docstring""" self.insert_nth(len(self ) , UpperCamelCase ) def snake_case ( self , UpperCamelCase ): """simple docstring""" self.insert_nth(0 , UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" if index < 0 or index > len(self ): raise IndexError("list index out of range." ) lowerCamelCase_ = Node(UpperCamelCase ) if self.head is None: lowerCamelCase_ = new_node # first node points itself lowerCamelCase_ = lowerCamelCase_ = new_node elif index == 0: # insert at head lowerCamelCase_ = self.head lowerCamelCase_ = lowerCamelCase_ = new_node else: lowerCamelCase_ = self.head for _ in range(index - 1 ): lowerCamelCase_ = temp.next lowerCamelCase_ = temp.next lowerCamelCase_ = new_node if index == len(self ) - 1: # insert at tail lowerCamelCase_ = new_node def snake_case ( self ): """simple docstring""" return self.delete_nth(0 ) def snake_case ( self ): """simple docstring""" return self.delete_nth(len(self ) - 1 ) def snake_case ( self , UpperCamelCase = 0 ): """simple docstring""" if not 0 <= index < len(self ): raise IndexError("list index out of range." ) lowerCamelCase_ = self.head if self.head == self.tail: # just one node lowerCamelCase_ = lowerCamelCase_ = None elif index == 0: # delete head node lowerCamelCase_ = self.tail.next.next lowerCamelCase_ = self.head.next else: lowerCamelCase_ = self.head for _ in range(index - 1 ): lowerCamelCase_ = temp.next lowerCamelCase_ = temp.next lowerCamelCase_ = temp.next.next if index == len(self ) - 1: # delete at tail lowerCamelCase_ = temp return delete_node.data def snake_case ( self ): """simple docstring""" return len(self ) == 0 def __snake_case ( ): lowerCamelCase_ = CircularLinkedList() assert len(UpperCAmelCase_ ) == 0 assert circular_linked_list.is_empty() is True assert str(UpperCAmelCase_ ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(UpperCAmelCase_ ) == i circular_linked_list.insert_nth(UpperCAmelCase_ , i + 1 ) assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
675
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a_ : Any = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = GPTSwaTokenizer _lowerCamelCase = False _lowerCamelCase = True _lowerCamelCase = False def snake_case ( self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = "This is a test" lowerCamelCase_ = "This is a test" return input_text, output_text def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "<s>" lowerCamelCase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(UpperCamelCase ) , 2000 ) def snake_case ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 2000 ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase ) lowerCamelCase_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [465, 287, 265, 631, 842] ) lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) # fmt: off self.assertListEqual( UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , ) # fmt: on lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase ) self.assertListEqual( UpperCamelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) lowerCamelCase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) # fmt: off self.assertListEqual( UpperCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ) # fmt: on def snake_case ( self ): """simple docstring""" lowerCamelCase_ = GPTSwaTokenizer(UpperCamelCase ) lowerCamelCase_ = ["This is a test", "I was born in 92000, and this is falsé."] lowerCamelCase_ = [ [465, 287, 265, 631, 842], [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(UpperCamelCase , UpperCamelCase ): self.assertListEqual(tokenizer.encode_fast(UpperCamelCase ) , UpperCamelCase ) # Test that decode_fast returns the input text for text, token_ids in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(tokenizer.decode_fast(UpperCamelCase ) , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = [ "<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off lowerCamelCase_ = {"input_ids": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=UpperCamelCase , )
675
1
'''simple docstring''' import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class snake_case ( lowercase , lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = StableUnCLIPPipeline _lowerCamelCase = TEXT_TO_IMAGE_PARAMS _lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS _lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS _lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false _lowerCamelCase = False def snake_case ( self ): """simple docstring""" lowerCamelCase_ = 32 lowerCamelCase_ = embedder_hidden_size # prior components torch.manual_seed(0 ) lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowerCamelCase_ = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=UpperCamelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) lowerCamelCase_ = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase , num_layers=1 , ) torch.manual_seed(0 ) lowerCamelCase_ = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=UpperCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) lowerCamelCase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase ) lowerCamelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowerCamelCase_ = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) lowerCamelCase_ = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase , layers_per_block=1 , upcast_attention=UpperCamelCase , use_linear_projection=UpperCamelCase , ) torch.manual_seed(0 ) lowerCamelCase_ = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCamelCase , steps_offset=1 , ) torch.manual_seed(0 ) lowerCamelCase_ = AutoencoderKL() lowerCamelCase_ = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def snake_case ( self , UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" if str(UpperCamelCase ).startswith("mps" ): lowerCamelCase_ = torch.manual_seed(UpperCamelCase ) else: lowerCamelCase_ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) lowerCamelCase_ = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def snake_case ( self ): """simple docstring""" lowerCamelCase_ = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase ) @slow @require_torch_gpu class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self ): """simple docstring""" lowerCamelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCamelCase_ = pipe("anime turle" , generator=UpperCamelCase , output_type="np" ) lowerCamelCase_ = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) lowerCamelCase_ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase_ = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) lowerCamelCase_ = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
675
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = ["image_processor", "tokenizer"] _lowerCamelCase = "OwlViTImageProcessor" _lowerCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCamelCase , ) lowerCamelCase_ = kwargs.pop("feature_extractor" ) lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(UpperCamelCase , UpperCamelCase ) def __call__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="max_length" , UpperCamelCase="np" , **UpperCamelCase ): """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(UpperCamelCase , UpperCamelCase ) or (isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(text[0] , UpperCamelCase )): lowerCamelCase_ = [self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )] elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(text[0] , UpperCamelCase ): lowerCamelCase_ = [] # Maximum number of queries across batch lowerCamelCase_ = max([len(UpperCamelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(UpperCamelCase ) != max_num_queries: lowerCamelCase_ = t + [" "] * (max_num_queries - len(UpperCamelCase )) lowerCamelCase_ = self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) encodings.append(UpperCamelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": lowerCamelCase_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowerCamelCase_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowerCamelCase_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) lowerCamelCase_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowerCamelCase_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) lowerCamelCase_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) lowerCamelCase_ = BatchEncoding() lowerCamelCase_ = input_ids lowerCamelCase_ = attention_mask if query_images is not None: lowerCamelCase_ = BatchEncoding() lowerCamelCase_ = self.image_processor( UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ).pixel_values lowerCamelCase_ = query_pixel_values if images is not None: lowerCamelCase_ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: lowerCamelCase_ = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowerCamelCase_ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process_object_detection(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.image_processor.post_process_image_guided_detection(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def snake_case ( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase , ) return self.image_processor_class @property def snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase , ) return self.image_processor
675
1
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = IFInpaintingPipeline _lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} _lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS _lowerCamelCase = PipelineTesterMixin.required_optional_params - {"latents"} def snake_case ( self ): """simple docstring""" return self._get_dummy_components() def snake_case ( self , UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" if str(UpperCamelCase ).startswith("mps" ): lowerCamelCase_ = torch.manual_seed(UpperCamelCase ) else: lowerCamelCase_ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) lowerCamelCase_ = { "prompt": "A painting of a squirrel eating a burger", "image": image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def snake_case ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def snake_case ( self ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def snake_case ( self ): """simple docstring""" # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def snake_case ( self ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def snake_case ( self ): """simple docstring""" self._test_save_load_local() def snake_case ( self ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
675
'''simple docstring''' import os import sys import unittest a_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path a_ : Tuple = os.path.join(git_repo_path, """src""", """transformers""") a_ : List[Any] = """ {0} = None """ a_ : Optional[Any] = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ a_ : str = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" ) self.assertIsNone(UpperCamelCase ) lowerCamelCase_ = find_backend(" if not is_tokenizers_available():" ) self.assertEqual(UpperCamelCase , "tokenizers" ) lowerCamelCase_ = find_backend(" if not is_tensorflow_text_available():" ) self.assertEqual(UpperCamelCase , "tensorflow_text" ) lowerCamelCase_ = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers" ) lowerCamelCase_ = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tensorflow_text" ) lowerCamelCase_ = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers_and_vision" ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , UpperCamelCase ) self.assertIn("tensorflow_text" , UpperCamelCase ) self.assertIn("sentencepiece_and_tokenizers" , UpperCamelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertModel" , objects["tf"] ) self.assertIn("FlaxBertModel" , objects["flax"] ) self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] ) self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(UpperCamelCase , "\nCONSTANT = None\n" ) lowerCamelCase_ = create_dummy_object("function" , "'torch'" ) self.assertEqual( UpperCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) lowerCamelCase_ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n" lowerCamelCase_ = create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n" lowerCamelCase_ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , UpperCamelCase )
675
1
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) a_ : Any = logging.get_logger(__name__) a_ : Dict = OrderedDict( [ ("""align""", """EfficientNetImageProcessor"""), ("""beit""", """BeitImageProcessor"""), ("""bit""", """BitImageProcessor"""), ("""blip""", """BlipImageProcessor"""), ("""blip-2""", """BlipImageProcessor"""), ("""bridgetower""", """BridgeTowerImageProcessor"""), ("""chinese_clip""", """ChineseCLIPImageProcessor"""), ("""clip""", """CLIPImageProcessor"""), ("""clipseg""", """ViTImageProcessor"""), ("""conditional_detr""", """ConditionalDetrImageProcessor"""), ("""convnext""", """ConvNextImageProcessor"""), ("""convnextv2""", """ConvNextImageProcessor"""), ("""cvt""", """ConvNextImageProcessor"""), ("""data2vec-vision""", """BeitImageProcessor"""), ("""deformable_detr""", """DeformableDetrImageProcessor"""), ("""deit""", """DeiTImageProcessor"""), ("""deta""", """DetaImageProcessor"""), ("""detr""", """DetrImageProcessor"""), ("""dinat""", """ViTImageProcessor"""), ("""donut-swin""", """DonutImageProcessor"""), ("""dpt""", """DPTImageProcessor"""), ("""efficientformer""", """EfficientFormerImageProcessor"""), ("""efficientnet""", """EfficientNetImageProcessor"""), ("""flava""", """FlavaImageProcessor"""), ("""focalnet""", """BitImageProcessor"""), ("""git""", """CLIPImageProcessor"""), ("""glpn""", """GLPNImageProcessor"""), ("""groupvit""", """CLIPImageProcessor"""), ("""imagegpt""", """ImageGPTImageProcessor"""), ("""instructblip""", """BlipImageProcessor"""), ("""layoutlmv2""", """LayoutLMv2ImageProcessor"""), ("""layoutlmv3""", """LayoutLMv3ImageProcessor"""), ("""levit""", """LevitImageProcessor"""), ("""mask2former""", """Mask2FormerImageProcessor"""), ("""maskformer""", """MaskFormerImageProcessor"""), ("""mgp-str""", """ViTImageProcessor"""), ("""mobilenet_v1""", """MobileNetV1ImageProcessor"""), ("""mobilenet_v2""", """MobileNetV2ImageProcessor"""), ("""mobilevit""", """MobileViTImageProcessor"""), ("""mobilevit""", """MobileViTImageProcessor"""), ("""mobilevitv2""", """MobileViTImageProcessor"""), ("""nat""", """ViTImageProcessor"""), ("""oneformer""", """OneFormerImageProcessor"""), ("""owlvit""", """OwlViTImageProcessor"""), ("""perceiver""", """PerceiverImageProcessor"""), ("""pix2struct""", """Pix2StructImageProcessor"""), ("""poolformer""", """PoolFormerImageProcessor"""), ("""regnet""", """ConvNextImageProcessor"""), ("""resnet""", """ConvNextImageProcessor"""), ("""sam""", """SamImageProcessor"""), ("""segformer""", """SegformerImageProcessor"""), ("""swiftformer""", """ViTImageProcessor"""), ("""swin""", """ViTImageProcessor"""), ("""swin2sr""", """Swin2SRImageProcessor"""), ("""swinv2""", """ViTImageProcessor"""), ("""table-transformer""", """DetrImageProcessor"""), ("""timesformer""", """VideoMAEImageProcessor"""), ("""tvlt""", """TvltImageProcessor"""), ("""upernet""", """SegformerImageProcessor"""), ("""van""", """ConvNextImageProcessor"""), ("""videomae""", """VideoMAEImageProcessor"""), ("""vilt""", """ViltImageProcessor"""), ("""vit""", """ViTImageProcessor"""), ("""vit_hybrid""", """ViTHybridImageProcessor"""), ("""vit_mae""", """ViTImageProcessor"""), ("""vit_msn""", """ViTImageProcessor"""), ("""xclip""", """CLIPImageProcessor"""), ("""yolos""", """YolosImageProcessor"""), ] ) a_ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def __snake_case ( UpperCAmelCase_ : str ): for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: lowerCamelCase_ = model_type_to_module_name(UpperCAmelCase_ ) lowerCamelCase_ = importlib.import_module(F'''.{module_name}''' , "transformers.models" ) try: return getattr(UpperCAmelCase_ , UpperCAmelCase_ ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(UpperCAmelCase_ , "__name__" , UpperCAmelCase_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowerCamelCase_ = importlib.import_module("transformers" ) if hasattr(UpperCAmelCase_ , UpperCAmelCase_ ): return getattr(UpperCAmelCase_ , UpperCAmelCase_ ) return None def __snake_case ( UpperCAmelCase_ : Union[str, os.PathLike] , UpperCAmelCase_ : Optional[Union[str, os.PathLike]] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[Dict[str, str]] = None , UpperCAmelCase_ : Optional[Union[bool, str]] = None , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : bool = False , **UpperCAmelCase_ : str , ): lowerCamelCase_ = get_file_from_repo( UpperCAmelCase_ , UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , force_download=UpperCAmelCase_ , resume_download=UpperCAmelCase_ , proxies=UpperCAmelCase_ , use_auth_token=UpperCAmelCase_ , revision=UpperCAmelCase_ , local_files_only=UpperCAmelCase_ , ) if resolved_config_file is None: logger.info( "Could not locate the image processor configuration file, will try to use the model config instead." ) return {} with open(UpperCAmelCase_ , encoding="utf-8" ) as reader: return json.load(UpperCAmelCase_ ) class snake_case : """simple docstring""" def __init__( self ): """simple docstring""" raise EnvironmentError( "AutoImageProcessor is designed to be instantiated " "using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(UpperCamelCase ) def snake_case ( cls , UpperCamelCase , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = kwargs.pop("config" , UpperCamelCase ) lowerCamelCase_ = kwargs.pop("trust_remote_code" , UpperCamelCase ) lowerCamelCase_ = True lowerCamelCase_ ,lowerCamelCase_ = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase , **UpperCamelCase ) lowerCamelCase_ = config_dict.get("image_processor_type" , UpperCamelCase ) lowerCamelCase_ = None if "AutoImageProcessor" in config_dict.get("auto_map" , {} ): lowerCamelCase_ = config_dict["auto_map"]["AutoImageProcessor"] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: lowerCamelCase_ = config_dict.pop("feature_extractor_type" , UpperCamelCase ) if feature_extractor_class is not None: logger.warning( "Could not find image processor class in the image processor config or the model config. Loading" " based on pattern matching with the model's feature extractor configuration." ) lowerCamelCase_ = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" ) if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ): lowerCamelCase_ = config_dict["auto_map"]["AutoFeatureExtractor"] lowerCamelCase_ = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" ) logger.warning( "Could not find image processor auto map in the image processor config or the model config." " Loading based on pattern matching with the model's feature extractor configuration." ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = AutoConfig.from_pretrained(UpperCamelCase , **UpperCamelCase ) # It could be in `config.image_processor_type`` lowerCamelCase_ = getattr(UpperCamelCase , "image_processor_type" , UpperCamelCase ) if hasattr(UpperCamelCase , "auto_map" ) and "AutoImageProcessor" in config.auto_map: lowerCamelCase_ = config.auto_map["AutoImageProcessor"] if image_processor_class is not None: lowerCamelCase_ = image_processor_class_from_name(UpperCamelCase ) lowerCamelCase_ = image_processor_auto_map is not None lowerCamelCase_ = image_processor_class is not None or type(UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING lowerCamelCase_ = resolve_trust_remote_code( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) if has_remote_code and trust_remote_code: lowerCamelCase_ = get_class_from_dynamic_module( UpperCamelCase , UpperCamelCase , **UpperCamelCase ) lowerCamelCase_ = kwargs.pop("code_revision" , UpperCamelCase ) if os.path.isdir(UpperCamelCase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase ) elif image_processor_class is not None: return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING: lowerCamelCase_ = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase )] return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase ) raise ValueError( f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ''' f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ''' f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def snake_case ( UpperCamelCase , UpperCamelCase ): """simple docstring""" IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase , UpperCamelCase )
675
'''simple docstring''' from ..utils import DummyObject, requires_backends class snake_case ( metaclass=lowercase ): """simple docstring""" _lowerCamelCase = ["onnx"] def __init__( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(self , ["onnx"] ) @classmethod def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(cls , ["onnx"] ) @classmethod def snake_case ( cls , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" requires_backends(cls , ["onnx"] )
675
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a_ : List[Any] = { """configuration_pix2struct""": [ """PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Pix2StructConfig""", """Pix2StructTextConfig""", """Pix2StructVisionConfig""", ], """processing_pix2struct""": ["""Pix2StructProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = ["""Pix2StructImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = [ """PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Pix2StructPreTrainedModel""", """Pix2StructForConditionalGeneration""", """Pix2StructVisionModel""", """Pix2StructTextModel""", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
675
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class snake_case : """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , UpperCamelCase=1000 , ): """simple docstring""" lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope lowerCamelCase_ = range_bbox def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCamelCase_ = bbox[i, j, 3] lowerCamelCase_ = bbox[i, j, 1] lowerCamelCase_ = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCamelCase_ = bbox[i, j, 2] lowerCamelCase_ = bbox[i, j, 0] lowerCamelCase_ = t lowerCamelCase_ = tf.convert_to_tensor(UpperCamelCase ) lowerCamelCase_ = None if self.use_input_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModel(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , token_type_ids=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMForMaskedLM(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = TFLayoutLMForSequenceClassification(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.num_labels lowerCamelCase_ = TFLayoutLMForTokenClassification(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = TFLayoutLMForQuestionAnswering(config=UpperCamelCase ) lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class snake_case ( lowercase , lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) _lowerCamelCase = ( { "feature-extraction": TFLayoutLMModel, "fill-mask": TFLayoutLMForMaskedLM, "text-classification": TFLayoutLMForSequenceClassification, "token-classification": TFLayoutLMForTokenClassification, "zero-shot": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase = False _lowerCamelCase = True _lowerCamelCase = 10 def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def snake_case ( self ): """simple docstring""" self.config_tester.run_common_tests() def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFLayoutLMModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def snake_case ( self ): """simple docstring""" pass def __snake_case ( ): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off lowerCamelCase_ = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 lowerCamelCase_ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) lowerCamelCase_ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class snake_case ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) # test the sequence output on [0, :3, :3] lowerCamelCase_ = tf.convert_to_tensor( [[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase , atol=1e-3 ) ) # test the pooled output on [1, :3] lowerCamelCase_ = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , UpperCamelCase , atol=1e-3 ) ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized sequence classification head lowerCamelCase_ = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model( input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar lowerCamelCase_ = outputs.loss lowerCamelCase_ = (2,) self.assertEqual(loss.shape , UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = outputs.logits lowerCamelCase_ = (2, 2) self.assertEqual(logits.shape , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized token classification head lowerCamelCase_ = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model( input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = outputs.logits lowerCamelCase_ = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , UpperCamelCase ) @slow def snake_case ( self ): """simple docstring""" # initialize model with randomly initialized token classification head lowerCamelCase_ = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs() # forward pass lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) # test the shape of the logits lowerCamelCase_ = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , UpperCamelCase ) self.assertEqual(outputs.end_logits.shape , UpperCamelCase )
675
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig a_ : Optional[int] = { """google/tapas-base-finetuned-sqa""": ( """https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json""" ), """google/tapas-base-finetuned-wtq""": ( """https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json""" ), """google/tapas-base-finetuned-wikisql-supervised""": ( """https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json""" ), """google/tapas-base-finetuned-tabfact""": ( """https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json""" ), } class snake_case ( lowercase ): """simple docstring""" _lowerCamelCase = "tapas" def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=1024 , UpperCamelCase=[3, 256, 256, 2, 256, 256, 10] , UpperCamelCase=0.02 , UpperCamelCase=1e-12 , UpperCamelCase=0 , UpperCamelCase=10.0 , UpperCamelCase=0 , UpperCamelCase=1.0 , UpperCamelCase=None , UpperCamelCase=1.0 , UpperCamelCase=False , UpperCamelCase=None , UpperCamelCase=1.0 , UpperCamelCase=1.0 , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase="ratio" , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=64 , UpperCamelCase=32 , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase , ): """simple docstring""" super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = hidden_act lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_sizes lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps # Fine-tuning task hyperparameters lowerCamelCase_ = positive_label_weight lowerCamelCase_ = num_aggregation_labels lowerCamelCase_ = aggregation_loss_weight lowerCamelCase_ = use_answer_as_supervision lowerCamelCase_ = answer_loss_importance lowerCamelCase_ = use_normalized_answer_loss lowerCamelCase_ = huber_loss_delta lowerCamelCase_ = temperature lowerCamelCase_ = aggregation_temperature lowerCamelCase_ = use_gumbel_for_cells lowerCamelCase_ = use_gumbel_for_aggregation lowerCamelCase_ = average_approximation_function lowerCamelCase_ = cell_selection_preference lowerCamelCase_ = answer_loss_cutoff lowerCamelCase_ = max_num_rows lowerCamelCase_ = max_num_columns lowerCamelCase_ = average_logits_per_cell lowerCamelCase_ = select_one_column lowerCamelCase_ = allow_empty_column_selection lowerCamelCase_ = init_cell_selection_weights_to_zero lowerCamelCase_ = reset_position_index_per_cell lowerCamelCase_ = disable_per_token_loss # Aggregation hyperparameters lowerCamelCase_ = aggregation_labels lowerCamelCase_ = no_aggregation_label_index if isinstance(self.aggregation_labels , UpperCamelCase ): lowerCamelCase_ = {int(UpperCamelCase ): v for k, v in aggregation_labels.items()}
675
'''simple docstring''' import argparse from collections import defaultdict import yaml a_ : int = """docs/source/en/_toctree.yml""" def __snake_case ( UpperCAmelCase_ : Optional[int] ): lowerCamelCase_ = defaultdict(UpperCAmelCase_ ) lowerCamelCase_ = [] lowerCamelCase_ = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"local": doc["local"], "title": doc["title"]} ) else: new_doc_list.append(UpperCAmelCase_ ) lowerCamelCase_ = new_doc_list lowerCamelCase_ = [key for key, value in counts.items() if value > 1] lowerCamelCase_ = [] for duplicate_key in duplicates: lowerCamelCase_ = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} ) if len(UpperCAmelCase_ ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] ) lowerCamelCase_ = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(UpperCAmelCase_ ) > 1: raise ValueError("{doc_list} has two 'overview' docs which is not allowed." ) overview_doc.extend(UpperCAmelCase_ ) # Sort return overview_doc def __snake_case ( UpperCAmelCase_ : List[str]=False ): with open(UpperCAmelCase_ , encoding="utf-8" ) as f: lowerCamelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCamelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCamelCase_ = content[api_idx]["sections"] # Then to the model doc lowerCamelCase_ = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 lowerCamelCase_ = api_doc[scheduler_idx]["sections"] lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ ) lowerCamelCase_ = False if new_scheduler_doc != scheduler_doc: lowerCamelCase_ = True if overwrite: lowerCamelCase_ = new_scheduler_doc if diff: if overwrite: lowerCamelCase_ = api_doc with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) def __snake_case ( UpperCAmelCase_ : List[Any]=False ): with open(UpperCAmelCase_ , encoding="utf-8" ) as f: lowerCamelCase_ = yaml.safe_load(f.read() ) # Get to the API doc lowerCamelCase_ = 0 while content[api_idx]["title"] != "API": api_idx += 1 lowerCamelCase_ = content[api_idx]["sections"] # Then to the model doc lowerCamelCase_ = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 lowerCamelCase_ = False lowerCamelCase_ = api_doc[pipeline_idx]["sections"] lowerCamelCase_ = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: lowerCamelCase_ = pipeline_doc["section"] lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ ) if overwrite: lowerCamelCase_ = new_sub_pipeline_doc new_pipeline_docs.append(UpperCAmelCase_ ) # sort overall pipeline doc lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ ) if new_pipeline_docs != pipeline_docs: lowerCamelCase_ = True if overwrite: lowerCamelCase_ = new_pipeline_docs if diff: if overwrite: lowerCamelCase_ = api_doc with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": a_ : Tuple = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") a_ : int = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
675
1
'''simple docstring''' import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = BertJapaneseTokenizer _lowerCamelCase = False _lowerCamelCase = True def snake_case ( self ): """simple docstring""" super().setUp() lowerCamelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは", "世界", "##世界", "、", "##、", "。", "##。", ] lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = "こんにちは、世界。 \nこんばんは、世界。" lowerCamelCase_ = "こんにちは 、 世界 。 こんばんは 、 世界 。" return input_text, output_text def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ ,lowerCamelCase_ = self.get_input_output_texts(UpperCamelCase ) lowerCamelCase_ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) lowerCamelCase_ = tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase ) return text, ids def snake_case ( self ): """simple docstring""" pass # TODO add if relevant def snake_case ( self ): """simple docstring""" pass # TODO add if relevant def snake_case ( self ): """simple docstring""" pass # TODO add if relevant def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.tokenizer_class(self.vocab_file ) lowerCamelCase_ = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" ) self.assertListEqual(UpperCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" ) self.assertIsNotNone(UpperCamelCase ) lowerCamelCase_ = "こんにちは、世界。\nこんばんは、世界。" lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCamelCase_ = os.path.join(self.tmpdirname , "tokenizer.bin" ) with open(UpperCamelCase , "wb" ) as handle: pickle.dump(UpperCamelCase , UpperCamelCase ) with open(UpperCamelCase , "rb" ) as handle: lowerCamelCase_ = pickle.load(UpperCamelCase ) lowerCamelCase_ = tokenizer_new.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = MecabTokenizer(mecab_dic="ipadic" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) def snake_case ( self ): """simple docstring""" try: lowerCamelCase_ = MecabTokenizer(mecab_dic="unidic_lite" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) def snake_case ( self ): """simple docstring""" try: lowerCamelCase_ = MecabTokenizer(mecab_dic="unidic" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = MecabTokenizer(do_lower_case=UpperCamelCase , mecab_dic="ipadic" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) def snake_case ( self ): """simple docstring""" try: lowerCamelCase_ = MecabTokenizer( do_lower_case=UpperCamelCase , normalize_text=UpperCamelCase , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = MecabTokenizer(normalize_text=UpperCamelCase , mecab_dic="ipadic" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , ) @require_sudachi def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" ) self.assertIsNotNone(UpperCamelCase ) lowerCamelCase_ = "こんにちは、世界。\nこんばんは、世界。" lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCamelCase_ = os.path.join(self.tmpdirname , "tokenizer.bin" ) with open(UpperCamelCase , "wb" ) as handle: pickle.dump(UpperCamelCase , UpperCamelCase ) with open(UpperCamelCase , "rb" ) as handle: lowerCamelCase_ = pickle.load(UpperCamelCase ) lowerCamelCase_ = tokenizer_new.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_sudachi def snake_case ( self ): """simple docstring""" lowerCamelCase_ = SudachiTokenizer(sudachi_dict_type="core" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , ) @require_sudachi def snake_case ( self ): """simple docstring""" lowerCamelCase_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" ) self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] ) @require_sudachi def snake_case ( self ): """simple docstring""" lowerCamelCase_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" ) self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] ) @require_sudachi def snake_case ( self ): """simple docstring""" lowerCamelCase_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" ) self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] ) @require_sudachi def snake_case ( self ): """simple docstring""" lowerCamelCase_ = SudachiTokenizer(do_lower_case=UpperCamelCase , sudachi_dict_type="core" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , ) @require_sudachi def snake_case ( self ): """simple docstring""" lowerCamelCase_ = SudachiTokenizer(normalize_text=UpperCamelCase , sudachi_dict_type="core" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , ) @require_sudachi def snake_case ( self ): """simple docstring""" lowerCamelCase_ = SudachiTokenizer(trim_whitespace=UpperCamelCase , sudachi_dict_type="core" ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , ) @require_jumanpp def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" ) self.assertIsNotNone(UpperCamelCase ) lowerCamelCase_ = "こんにちは、世界。\nこんばんは、世界。" lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCamelCase_ = os.path.join(self.tmpdirname , "tokenizer.bin" ) with open(UpperCamelCase , "wb" ) as handle: pickle.dump(UpperCamelCase , UpperCamelCase ) with open(UpperCamelCase , "rb" ) as handle: lowerCamelCase_ = pickle.load(UpperCamelCase ) lowerCamelCase_ = tokenizer_new.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_jumanpp def snake_case ( self ): """simple docstring""" lowerCamelCase_ = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , ) @require_jumanpp def snake_case ( self ): """simple docstring""" lowerCamelCase_ = JumanppTokenizer(do_lower_case=UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , ) @require_jumanpp def snake_case ( self ): """simple docstring""" lowerCamelCase_ = JumanppTokenizer(normalize_text=UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , ) @require_jumanpp def snake_case ( self ): """simple docstring""" lowerCamelCase_ = JumanppTokenizer(trim_whitespace=UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , ) @require_jumanpp def snake_case ( self ): """simple docstring""" lowerCamelCase_ = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"] lowerCamelCase_ = {} for i, token in enumerate(UpperCamelCase ): lowerCamelCase_ = i lowerCamelCase_ = WordpieceTokenizer(vocab=UpperCamelCase , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] ) self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] ) self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" ) lowerCamelCase_ = tokenizer.subword_tokenizer lowerCamelCase_ = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" ) self.assertListEqual(UpperCamelCase , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] ) lowerCamelCase_ = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" ) self.assertListEqual(UpperCamelCase , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" ) lowerCamelCase_ = tokenizer.encode("ありがとう。" , add_special_tokens=UpperCamelCase ) lowerCamelCase_ = tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCamelCase ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = BertJapaneseTokenizer _lowerCamelCase = False def snake_case ( self ): """simple docstring""" super().setUp() lowerCamelCase_ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"] lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def snake_case ( self , **UpperCamelCase ): """simple docstring""" return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **UpperCamelCase ) def snake_case ( self , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = "こんにちは、世界。 \nこんばんは、世界。" lowerCamelCase_ = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。" return input_text, output_text def snake_case ( self ): """simple docstring""" pass # TODO add if relevant def snake_case ( self ): """simple docstring""" pass # TODO add if relevant def snake_case ( self ): """simple docstring""" pass # TODO add if relevant def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" ) lowerCamelCase_ = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" ) self.assertListEqual( UpperCamelCase , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"] lowerCamelCase_ = {} for i, token in enumerate(UpperCamelCase ): lowerCamelCase_ = i lowerCamelCase_ = CharacterTokenizer(vocab=UpperCamelCase , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] ) self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" ) lowerCamelCase_ = tokenizer.encode("ありがとう。" , add_special_tokens=UpperCamelCase ) lowerCamelCase_ = tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCamelCase ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "cl-tohoku/bert-base-japanese" lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "cl-tohoku/bert-base-japanese" with self.assertLogs("transformers" , level="WARNING" ) as cm: BertTokenizer.from_pretrained(UpperCamelCase ) self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class this function" " is called from." ) ) lowerCamelCase_ = "bert-base-cased" with self.assertLogs("transformers" , level="WARNING" ) as cm: BertJapaneseTokenizer.from_pretrained(UpperCamelCase ) self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class this function" " is called from." ) )
675
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=1024 , UpperCAmelCase_ : Tuple=1024 , UpperCAmelCase_ : List[Any]=False , **UpperCAmelCase_ : Optional[Any] ): lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="train" , **UpperCAmelCase_ ) lowerCamelCase_ = tok.pad_token_id def get_lens(UpperCAmelCase_ : List[str] ): lowerCamelCase_ = tqdm( DataLoader(UpperCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) lowerCamelCase_ = [] for batch in dl: lowerCamelCase_ = batch["input_ids"].ne(UpperCAmelCase_ ).sum(1 ).tolist() lowerCamelCase_ = batch["labels"].ne(UpperCAmelCase_ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(UpperCAmelCase_ , UpperCAmelCase_ ): max_lens.append(max(UpperCAmelCase_ , UpperCAmelCase_ ) ) else: max_lens.extend(UpperCAmelCase_ ) return max_lens lowerCamelCase_ = get_lens(UpperCAmelCase_ ) lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="val" , **UpperCAmelCase_ ) lowerCamelCase_ = get_lens(UpperCAmelCase_ ) pickle_save(UpperCAmelCase_ , train_ds.len_file ) pickle_save(UpperCAmelCase_ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
675
1
'''simple docstring''' import os def __snake_case ( UpperCAmelCase_ : str = "matrix.txt" ): with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as in_file: lowerCamelCase_ = in_file.read() lowerCamelCase_ = [[int(UpperCAmelCase_ ) for cell in row.split("," )] for row in data.strip().splitlines()] lowerCamelCase_ = [[0 for cell in row] for row in grid] lowerCamelCase_ = len(grid[0] ) lowerCamelCase_ = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )] lowerCamelCase_ = grid[0][0] for i in range(1 , UpperCAmelCase_ ): lowerCamelCase_ = grid[0][i] + dp[0][i - 1] for i in range(1 , UpperCAmelCase_ ): lowerCamelCase_ = grid[i][0] + dp[i - 1][0] for i in range(1 , UpperCAmelCase_ ): for j in range(1 , UpperCAmelCase_ ): lowerCamelCase_ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(f'''{solution() = }''')
675
'''simple docstring''' def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def __snake_case ( UpperCAmelCase_ : str ): lowerCamelCase_ = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key lowerCamelCase_ = remove_duplicates(key.upper() ) lowerCamelCase_ = len(UpperCAmelCase_ ) # First fill cipher with key characters lowerCamelCase_ = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase_ )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(UpperCAmelCase_ ) , 26 ): lowerCamelCase_ = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 lowerCamelCase_ = alphabet[i - offset] lowerCamelCase_ = char return cipher_alphabet def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict[str, str] ): return "".join(cipher_map.get(UpperCAmelCase_ , UpperCAmelCase_ ) for ch in message.upper() ) def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : dict[str, str] ): lowerCamelCase_ = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(UpperCAmelCase_ , UpperCAmelCase_ ) for ch in message.upper() ) def __snake_case ( ): lowerCamelCase_ = input("Enter message to encode or decode: " ).strip() lowerCamelCase_ = input("Enter keyword: " ).strip() lowerCamelCase_ = input("Encipher or decipher? E/D:" ).strip()[0].lower() try: lowerCamelCase_ = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError("invalid input option" ) lowerCamelCase_ = create_cipher_map(UpperCAmelCase_ ) print(func(UpperCAmelCase_ , UpperCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
675
1
'''simple docstring''' from __future__ import annotations from math import pow, sqrt def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ): if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance == 0: return {"resistance": sqrt(pow(UpperCAmelCase_ , 2 ) - pow(UpperCAmelCase_ , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(UpperCAmelCase_ , 2 ) - pow(UpperCAmelCase_ , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(UpperCAmelCase_ , 2 ) + pow(UpperCAmelCase_ , 2 ) )} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
675
'''simple docstring''' import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case ( lowercase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = OpenAIGPTTokenizer _lowerCamelCase = OpenAIGPTTokenizerFast _lowerCamelCase = True _lowerCamelCase = False def snake_case ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) lowerCamelCase_ = ["#version: 0.2", "l o", "lo w", "e r</w>", ""] lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(UpperCamelCase ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(UpperCamelCase ) ) def snake_case ( self , UpperCamelCase ): """simple docstring""" return "lower newer", "lower newer" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) lowerCamelCase_ = "lower" lowerCamelCase_ = ["low", "er</w>"] lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowerCamelCase_ = tokens + ["<unk>"] lowerCamelCase_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase ) def snake_case ( self , UpperCamelCase=15 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) # Simple input lowerCamelCase_ = "This is a simple input" lowerCamelCase_ = ["This is a simple input 1", "This is a simple input 2"] lowerCamelCase_ = ("This is a simple input", "This is a pair") lowerCamelCase_ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Simple input self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Simple input self.assertRaises( UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , ) # Pair input self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Pair input self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" ) # Pair input self.assertRaises( UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , ) def snake_case ( self ): """simple docstring""" pass @require_ftfy @require_spacy @require_tokenizers class snake_case ( lowercase ): """simple docstring""" pass
675
1