code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _a = logging.get_logger(__name__) def __a ( __lowerCamelCase, __lowerCamelCase=False ): UpperCAmelCase_ : Union[str, Any] = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") ) rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" UpperCAmelCase_ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: UpperCAmelCase_ : int = "" else: UpperCAmelCase_ : Any = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCAmelCase_ : str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) UpperCAmelCase_ : Optional[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ : Optional[int] = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase_ : List[Any] = in_proj_bias[: config.hidden_size] UpperCAmelCase_ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase_ : Union[str, Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCAmelCase_ : Tuple = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase_ : Union[str, Any] = in_proj_bias[-config.hidden_size :] def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__lowerCamelCase, __lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Optional[Any] = dct.pop(__lowerCamelCase ) UpperCAmelCase_ : List[str] = val def __a ( ): UpperCAmelCase_ : str = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : Optional[Any] = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ): UpperCAmelCase_ : int = BitConfig( global_padding="same", layer_type="bottleneck", depths=(3, 4, 9), out_features=["stage3"], embedding_dynamic_padding=__lowerCamelCase, ) UpperCAmelCase_ : List[str] = ViTHybridConfig(backbone_config=__lowerCamelCase, image_size=384, num_labels=1000 ) UpperCAmelCase_ : int = False # load original model from timm UpperCAmelCase_ : List[str] = timm.create_model(__lowerCamelCase, pretrained=__lowerCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys UpperCAmelCase_ : Any = timm_model.state_dict() if base_model: remove_classification_head_(__lowerCamelCase ) UpperCAmelCase_ : Any = create_rename_keys(__lowerCamelCase, __lowerCamelCase ) for src, dest in rename_keys: rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) read_in_q_k_v(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) UpperCAmelCase_ : Tuple = "huggingface/label-files" UpperCAmelCase_ : List[Any] = "imagenet-1k-id2label.json" UpperCAmelCase_ : Optional[Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) ) UpperCAmelCase_ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()} UpperCAmelCase_ : Optional[Any] = idalabel UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": UpperCAmelCase_ : int = ViTHybridModel(__lowerCamelCase ).eval() else: UpperCAmelCase_ : int = ViTHybridForImageClassification(__lowerCamelCase ).eval() model.load_state_dict(__lowerCamelCase ) # create image processor UpperCAmelCase_ : str = create_transform(**resolve_data_config({}, model=__lowerCamelCase ) ) UpperCAmelCase_ : Any = transform.transforms UpperCAmelCase_ : Optional[Any] = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } UpperCAmelCase_ : List[str] = ViTHybridImageProcessor( do_resize=__lowerCamelCase, size={"shortest_edge": timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=__lowerCamelCase, crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]}, do_normalize=__lowerCamelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), ) UpperCAmelCase_ : Any = prepare_img() UpperCAmelCase_ : Optional[int] = transform(__lowerCamelCase ).unsqueeze(0 ) UpperCAmelCase_ : int = processor(__lowerCamelCase, return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__lowerCamelCase, __lowerCamelCase ) # verify logits with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(__lowerCamelCase ) UpperCAmelCase_ : Any = outputs.logits print("Predicted class:", logits.argmax(-1 ).item() ) if base_model: UpperCAmelCase_ : int = timm_model.forward_features(__lowerCamelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowerCamelCase, outputs.pooler_output, atol=1E-3 ) else: UpperCAmelCase_ : Dict = timm_model(__lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCamelCase ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(__lowerCamelCase ) if push_to_hub: print(f"""Pushing model and processor to the hub {vit_name}""" ) model.push_to_hub(f"""ybelkada/{vit_name}""" ) processor.push_to_hub(f"""ybelkada/{vit_name}""" ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_r50_s16_384', type=str, help='Name of the hybrid ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) _a = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
61
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowercase__ ( __lowercase : Any ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def lowercase__ ( __lowercase : Tuple ) -> int: """simple docstring""" __UpperCamelCase , __UpperCamelCase = emb.weight.shape __UpperCamelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) __UpperCamelCase = emb.weight.data return lin_layer def lowercase__ ( __lowercase : int , __lowercase : List[str]="facebook/mbart-large-en-ro" , __lowercase : str=False , __lowercase : List[Any]=False ) -> int: """simple docstring""" __UpperCamelCase = torch.load(__lowercase , map_location='cpu' )['model'] remove_ignore_keys_(__lowercase ) __UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0] __UpperCamelCase = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase ) if mbart_aa and finetuned: __UpperCamelCase = 'relu' __UpperCamelCase = state_dict['decoder.embed_tokens.weight'] __UpperCamelCase = MBartForConditionalGeneration(__lowercase ) model.model.load_state_dict(__lowercase ) if finetuned: __UpperCamelCase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a__ : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') a__ : Union[str, Any] =parser.parse_args() a__ : str =convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
53
0
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ): __UpperCamelCase =FileLock(str(tmpdir / 'foo.lock' ) ) __UpperCamelCase =FileLock(str(tmpdir / 'foo.lock' ) ) __UpperCamelCase =0.01 with locka.acquire(): with pytest.raises(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =time.time() locka.acquire(SCREAMING_SNAKE_CASE__ ) assert time.time() - _start > timeout def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase ='a' * 10_00 + '.lock' __UpperCamelCase =FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('.lock' ) assert not locka._lock_file.endswith(SCREAMING_SNAKE_CASE__ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_55 __UpperCamelCase =FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(SCREAMING_SNAKE_CASE__ ): locka.acquire(0 )
62
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : Any , __A : Dict , __A : str , __A : List[Any]=1_0_2_4 , __A : Tuple=1_0_2_4 , __A : str=3.6 ): __UpperCamelCase = tokenizer __UpperCamelCase = tokenizer.bos_token_id __UpperCamelCase = dataset __UpperCamelCase = seq_length __UpperCamelCase = seq_length * chars_per_token * num_of_sequences def __iter__( self : Any ): __UpperCamelCase = iter(self.dataset ) __UpperCamelCase = True while more_examples: __UpperCamelCase , __UpperCamelCase = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(__A )['content'] ) buffer_len += len(buffer[-1] ) except StopIteration: __UpperCamelCase = False break __UpperCamelCase = tokenizer(__A , truncation=__A )['input_ids'] __UpperCamelCase = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(__A ) , self.seq_length ): __UpperCamelCase = all_token_ids[i : i + self.seq_length] if len(__A ) == self.seq_length: yield torch.tensor(__A ) def lowercase__ ( __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = {'streaming': True} __UpperCamelCase = load_dataset(args.dataset_name , split='train' , **__lowercase ) __UpperCamelCase = ConstantLengthDataset(__lowercase , __lowercase , seq_length=args.seq_length ) __UpperCamelCase = DataLoader(__lowercase , batch_size=args.batch_size ) return eval_dataloader def lowercase__ ( __lowercase : Tuple ) -> Optional[Any]: """simple docstring""" model.eval() __UpperCamelCase = [] for step, batch in enumerate(__lowercase ): with torch.no_grad(): __UpperCamelCase = model(__lowercase , labels=__lowercase ) __UpperCamelCase = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__lowercase ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break __UpperCamelCase = torch.mean(torch.cat(__lowercase ) ) try: __UpperCamelCase = torch.exp(__lowercase ) except OverflowError: __UpperCamelCase = float('inf' ) return loss.item(), perplexity.item() # Setup Accelerator a__ : int =Accelerator() # Parse configuration a__ : Dict =HfArgumentParser(EvaluationArguments) a__ : Union[str, Any] =parser.parse_args() set_seed(args.seed) # Logging a__ : List[Any] =logging.getLogger(__name__) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) # Load model and tokenizer a__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(args.model_ckpt) a__ : List[Any] =AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader a__ : Union[str, Any] =create_dataloader(args) # Prepare everything with our `accelerator`. a__ , a__ : List[str] =accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('''Evaluating and saving model after training''') a__ , a__ : Any =evaluate(args) logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
53
0
'''simple docstring''' from __future__ import annotations def _lowerCamelCase ( lowercase : float , lowercase : float , lowercase : float , ) -> tuple[str, float]: if (stress, tangential_force, area).count(0 ) != 1: raise ValueError("You cannot supply more or less than 2 values" ) elif stress < 0: raise ValueError("Stress cannot be negative" ) elif tangential_force < 0: raise ValueError("Tangential Force cannot be negative" ) elif area < 0: raise ValueError("Area cannot be negative" ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
63
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging a__ : Any =logging.get_logger(__name__) a__ : Optional[Any] ={ '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict ="gpt_neo" SCREAMING_SNAKE_CASE_ : Optional[int] =["past_key_values"] SCREAMING_SNAKE_CASE_ : List[Any] ={"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Union[str, Any] , __A : Union[str, Any]=5_0_2_5_7 , __A : Any=2_0_4_8 , __A : Optional[Any]=2_0_4_8 , __A : Any=2_4 , __A : Union[str, Any]=[[["global", "local"], 1_2]] , __A : str=1_6 , __A : Optional[int]=None , __A : Union[str, Any]=2_5_6 , __A : Any="gelu_new" , __A : Dict=0.0 , __A : Optional[int]=0.0 , __A : int=0.0 , __A : List[str]=0.1 , __A : Any=1e-5 , __A : int=0.02 , __A : List[str]=True , __A : Tuple=5_0_2_5_6 , __A : Optional[Any]=5_0_2_5_6 , **__A : Optional[Any] , ): __UpperCamelCase = vocab_size __UpperCamelCase = max_position_embeddings __UpperCamelCase = hidden_size __UpperCamelCase = num_layers __UpperCamelCase = num_heads __UpperCamelCase = intermediate_size __UpperCamelCase = window_size __UpperCamelCase = activation_function __UpperCamelCase = resid_dropout __UpperCamelCase = embed_dropout __UpperCamelCase = attention_dropout __UpperCamelCase = classifier_dropout __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = initializer_range __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id __UpperCamelCase = attention_types __UpperCamelCase = self.expand_attention_types_params(__A ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=__A , eos_token_id=__A , **__A ) @staticmethod def _lowerCamelCase ( __A : Tuple ): __UpperCamelCase = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase__ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Any: """simple docstring""" import torch __UpperCamelCase = input.size() __UpperCamelCase = len(__lowercase ) __UpperCamelCase = shape[dimension] __UpperCamelCase = torch.arange(0 , __lowercase , __lowercase ) __UpperCamelCase = torch.div(sizedim - size , __lowercase , rounding_mode='floor' ) + 1 __UpperCamelCase = torch.arange(__lowercase ) + low_indices[:min_length][:, None] __UpperCamelCase = [slice(__lowercase )] * rank __UpperCamelCase = indices __UpperCamelCase = input[s] __UpperCamelCase = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Optional[int]: """simple docstring""" import torch __UpperCamelCase = torch.arange(1 , __lowercase ) __UpperCamelCase = torch.remainder(__lowercase , __lowercase ) __UpperCamelCase = remainders == 0 __UpperCamelCase = candidates[divisor_indices] __UpperCamelCase = torch.max(__lowercase ) return largest_divisor, torch.div(__lowercase , __lowercase , rounding_mode='floor' ) class snake_case ( __lowerCamelCase ): """simple docstring""" @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(__A , direction='inputs' ) __UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return common_inputs @property def _lowerCamelCase ( self : int ): return self._config.num_heads def _lowerCamelCase ( self : List[str] , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ): __UpperCamelCase = super(__A , self ).generate_dummy_inputs( __A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A ) # We need to order the input in the way they appears in the forward() __UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCamelCase = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] __UpperCamelCase = common_inputs['attention_mask'] if self.use_past: __UpperCamelCase = ordered_inputs['attention_mask'].dtype __UpperCamelCase = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 ) return ordered_inputs @property def _lowerCamelCase ( self : Dict ): return 1_3
53
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer A_ = logging.get_logger(__name__) A_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A_ = { '''vocab_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt''' ), '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt''' ), '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''', '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json''' ), '''bert-base-multilingual-cased''': ( '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json''' ), '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-cased''': ( '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json''' ), }, } A_ = { '''bert-base-uncased''': 5_12, '''bert-large-uncased''': 5_12, '''bert-base-cased''': 5_12, '''bert-large-cased''': 5_12, '''bert-base-multilingual-uncased''': 5_12, '''bert-base-multilingual-cased''': 5_12, '''bert-base-chinese''': 5_12, '''bert-base-german-cased''': 5_12, '''bert-large-uncased-whole-word-masking''': 5_12, '''bert-large-cased-whole-word-masking''': 5_12, '''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_12, '''bert-large-cased-whole-word-masking-finetuned-squad''': 5_12, '''bert-base-cased-finetuned-mrpc''': 5_12, '''bert-base-german-dbmdz-cased''': 5_12, '''bert-base-german-dbmdz-uncased''': 5_12, '''TurkuNLP/bert-base-finnish-cased-v1''': 5_12, '''TurkuNLP/bert-base-finnish-uncased-v1''': 5_12, '''wietsedv/bert-base-dutch-cased''': 5_12, } A_ = { '''bert-base-uncased''': {'''do_lower_case''': True}, '''bert-large-uncased''': {'''do_lower_case''': True}, '''bert-base-cased''': {'''do_lower_case''': False}, '''bert-large-cased''': {'''do_lower_case''': False}, '''bert-base-multilingual-uncased''': {'''do_lower_case''': True}, '''bert-base-multilingual-cased''': {'''do_lower_case''': False}, '''bert-base-chinese''': {'''do_lower_case''': False}, '''bert-base-german-cased''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False}, '''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True}, '''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False}, '''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True}, '''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False}, } class lowercase( __a ): '''simple docstring''' lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_INIT_CONFIGURATION lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = BertTokenizer def __init__( self: int, a_: Any=None, a_: Tuple=None, a_: Tuple=True, a_: Union[str, Any]="[UNK]", a_: List[Any]="[SEP]", a_: Union[str, Any]="[PAD]", a_: List[str]="[CLS]", a_: Union[str, Any]="[MASK]", a_: List[str]=True, a_: List[str]=None, **a_: List[Any], ): '''simple docstring''' super().__init__( a_, tokenizer_file=a_, do_lower_case=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, tokenize_chinese_chars=a_, strip_accents=a_, **a_, ) _snake_case : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""", a_ ) != do_lower_case or normalizer_state.get("""strip_accents""", a_ ) != strip_accents or normalizer_state.get("""handle_chinese_chars""", a_ ) != tokenize_chinese_chars ): _snake_case : Optional[int] = getattr(a_, normalizer_state.pop("""type""" ) ) _snake_case : Optional[Any] = do_lower_case _snake_case : Optional[int] = strip_accents _snake_case : int = tokenize_chinese_chars _snake_case : Union[str, Any] = normalizer_class(**a_ ) _snake_case : str = do_lower_case def UpperCamelCase_ ( self: Dict, a_: Dict, a_: str=None ): '''simple docstring''' _snake_case : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self: Any, a_: List[int], a_: Optional[List[int]] = None ): '''simple docstring''' _snake_case : Dict = [self.sep_token_id] _snake_case : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self: Union[str, Any], a_: str, a_: Optional[str] = None ): '''simple docstring''' _snake_case : List[Any] = self._tokenizer.model.save(a_, name=a_ ) return tuple(a_ )
64
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa" SCREAMING_SNAKE_CASE_ : Dict =( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) SCREAMING_SNAKE_CASE_ : List[str] ="document_qa" SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"] SCREAMING_SNAKE_CASE_ : Any =["text"] def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ): if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*__A , **__A ) def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ): __UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' __UpperCamelCase = task_prompt.replace('{user_input}' , __A ) __UpperCamelCase = self.pre_processor.tokenizer( __A , add_special_tokens=__A , return_tensors='pt' ).input_ids __UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ): return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences def _lowerCamelCase ( self : Tuple , __A : List[Any] ): __UpperCamelCase = self.pre_processor.batch_decode(__A )[0] __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) __UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token __UpperCamelCase = self.pre_processor.tokenajson(__A ) return sequence["answer"]
53
0
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class A ( UpperCAmelCase_ ): def lowercase_ (self : Any ) -> List[str]: """simple docstring""" UpperCAmelCase__ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__UpperCAmelCase , "tf_padding" ) ) self.parent.assertTrue(hasattr(__UpperCAmelCase , "depth_multiplier" ) ) class A : def __init__(self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any=1_3 , __UpperCAmelCase : str=3 , __UpperCAmelCase : List[Any]=3_2 , __UpperCAmelCase : List[str]=0.25 , __UpperCAmelCase : Any=8 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=1_0_2_4 , __UpperCAmelCase : Optional[int]=3_2 , __UpperCAmelCase : Tuple="relu6" , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Tuple=1_0 , __UpperCAmelCase : Optional[Any]=None , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = parent UpperCAmelCase__ = batch_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = image_size UpperCAmelCase__ = depth_multiplier UpperCAmelCase__ = min_depth UpperCAmelCase__ = tf_padding UpperCAmelCase__ = int(last_hidden_size * depth_multiplier ) UpperCAmelCase__ = output_stride UpperCAmelCase__ = hidden_act UpperCAmelCase__ = classifier_dropout_prob UpperCAmelCase__ = use_labels UpperCAmelCase__ = is_training UpperCAmelCase__ = num_labels UpperCAmelCase__ = initializer_range UpperCAmelCase__ = scope def lowercase_ (self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ = None UpperCAmelCase__ = None if self.use_labels: UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase__ = self.get_config() return config, pixel_values, labels, pixel_labels def lowercase_ (self : Dict ) -> str: """simple docstring""" return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def lowercase_ (self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str ) -> Tuple: """simple docstring""" UpperCAmelCase__ = MobileNetVaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() UpperCAmelCase__ = model(__UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase_ (self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict ) -> int: """simple docstring""" UpperCAmelCase__ = self.num_labels UpperCAmelCase__ = MobileNetVaForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() UpperCAmelCase__ = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ (self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase__ = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs UpperCAmelCase__ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): __UpperCAmelCase : Any = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () __UpperCAmelCase : str = ( {'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification} if is_torch_available() else {} ) __UpperCAmelCase : Optional[int] = False __UpperCAmelCase : Any = False __UpperCAmelCase : int = False __UpperCAmelCase : Tuple = False def lowercase_ (self : Dict ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = MobileNetVaModelTester(self ) UpperCAmelCase__ = MobileNetVaConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase ) def lowercase_ (self : str ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV1 does not use inputs_embeds" ) def lowercase_ (self : List[Any] ) -> List[str]: """simple docstring""" pass @unittest.skip(reason="MobileNetV1 does not support input and output embeddings" ) def lowercase_ (self : List[str] ) -> str: """simple docstring""" pass @unittest.skip(reason="MobileNetV1 does not output attentions" ) def lowercase_ (self : Any ) -> Union[str, Any]: """simple docstring""" pass def lowercase_ (self : Tuple ) -> Optional[int]: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ = model_class(__UpperCAmelCase ) UpperCAmelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ = [*signature.parameters.keys()] UpperCAmelCase__ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowercase_ (self : Union[str, Any] ) -> Any: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowercase_ (self : List[Any] ) -> Optional[int]: """simple docstring""" def check_hidden_states_output(__UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ): UpperCAmelCase__ = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): UpperCAmelCase__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) UpperCAmelCase__ = outputs.hidden_states UpperCAmelCase__ = 2_6 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowercase_ (self : str ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def lowercase_ (self : List[str] ) -> Any: """simple docstring""" for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ = MobileNetVaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A ( unittest.TestCase ): @cached_property def lowercase_ (self : Optional[int] ) -> int: """simple docstring""" return ( MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None ) @slow def lowercase_ (self : Optional[Any] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(__UpperCAmelCase ) UpperCAmelCase__ = self.default_image_processor UpperCAmelCase__ = prepare_img() UpperCAmelCase__ = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): UpperCAmelCase__ = model(**__UpperCAmelCase ) # verify the logits UpperCAmelCase__ = torch.Size((1, 1_0_0_1) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) UpperCAmelCase__ = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
65
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
53
0
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging __a = logging.get_logger(__name__) __a = { "speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json", # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : Optional[Any] = """mctct""" def __init__( self: Optional[Any] , snake_case: List[str]=8_065 , snake_case: Optional[Any]=1_536 , snake_case: str=36 , snake_case: Optional[int]=6_144 , snake_case: Any=4 , snake_case: Any=384 , snake_case: Optional[Any]=920 , snake_case: Dict=1E-5 , snake_case: Any=0.3 , snake_case: Optional[Any]="relu" , snake_case: Tuple=0.0_2 , snake_case: int=0.3 , snake_case: Dict=0.3 , snake_case: Optional[int]=1 , snake_case: Dict=0 , snake_case: Optional[Any]=2 , snake_case: str=1 , snake_case: Union[str, Any]=0.3 , snake_case: List[str]=1 , snake_case: Tuple=(7,) , snake_case: Optional[Any]=(3,) , snake_case: int=80 , snake_case: List[str]=1 , snake_case: Any=None , snake_case: Union[str, Any]="sum" , snake_case: Any=False , **snake_case: List[Any] , ) -> Dict: super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case ) snake_case_ :int = vocab_size snake_case_ :List[Any] = hidden_size snake_case_ :str = num_hidden_layers snake_case_ :Tuple = intermediate_size snake_case_ :Optional[Any] = num_attention_heads snake_case_ :Dict = attention_head_dim snake_case_ :Optional[int] = max_position_embeddings snake_case_ :str = layer_norm_eps snake_case_ :Tuple = layerdrop snake_case_ :Any = hidden_act snake_case_ :Optional[int] = initializer_range snake_case_ :List[str] = hidden_dropout_prob snake_case_ :int = attention_probs_dropout_prob snake_case_ :int = pad_token_id snake_case_ :Optional[int] = bos_token_id snake_case_ :int = eos_token_id snake_case_ :Tuple = conv_glu_dim snake_case_ :List[str] = conv_dropout snake_case_ :List[str] = num_conv_layers snake_case_ :int = input_feat_per_channel snake_case_ :List[str] = input_channels snake_case_ :Tuple = conv_channels snake_case_ :Dict = ctc_loss_reduction snake_case_ :Any = ctc_zero_infinity # prevents config testing fail with exporting to json snake_case_ :Any = list(snake_case ) snake_case_ :str = list(snake_case ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """ f"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """ f"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
66
'''simple docstring''' import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase__ ( __lowercase : Features ) -> Optional[int]: """simple docstring""" __UpperCamelCase = np.inf def set_batch_size(__lowercase : FeatureType ) -> None: nonlocal batch_size if isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary": __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__lowercase , __lowercase ) return None if batch_size is np.inf else batch_size class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : List[str] , __A : NestedDataStructureLike[PathLike] , __A : Optional[NamedSplit] = None , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[int] = None , **__A : Dict , ): super().__init__( __A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , ) __UpperCamelCase = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths} __UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1] __UpperCamelCase = Parquet( cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , ) def _lowerCamelCase ( self : Optional[int] ): # Build iterable dataset if self.streaming: __UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None self.builder.download_and_prepare( download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , ) __UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=__A , in_memory=self.keep_in_memory ) return dataset class snake_case : """simple docstring""" def __init__( self : List[str] , __A : Dataset , __A : Union[PathLike, BinaryIO] , __A : Optional[int] = None , **__A : Dict , ): __UpperCamelCase = dataset __UpperCamelCase = path_or_buf __UpperCamelCase = batch_size or get_writer_batch_size(dataset.features ) __UpperCamelCase = parquet_writer_kwargs def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , 'wb+' ) as buffer: __UpperCamelCase = self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs ) else: __UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs ) return written def _lowerCamelCase ( self : List[str] , __A : BinaryIO , __A : int , **__A : List[str] ): __UpperCamelCase = 0 __UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __A ) __UpperCamelCase = self.dataset.features.arrow_schema __UpperCamelCase = pq.ParquetWriter(__A , schema=__A , **__A ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): __UpperCamelCase = query_table( table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__A ) written += batch.nbytes writer.close() return written
53
0
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __UpperCAmelCase =logging.get_logger(__name__) __UpperCAmelCase ={ "google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class a__ ( UpperCAmelCase__ ): lowerCamelCase : Any ="umt5" lowerCamelCase : Dict =["past_key_values"] def __init__( self : Optional[Any] , a : Optional[Any]=25_01_12 , a : str=5_12 , a : Union[str, Any]=64 , a : Union[str, Any]=10_24 , a : Dict=8 , a : Any=None , a : str=6 , a : Optional[int]=32 , a : List[str]=1_28 , a : Optional[int]=0.1 , a : Any=1e-6 , a : List[Any]=1.0 , a : int="gated-gelu" , a : str=True , a : Dict=True , a : Optional[int]="T5Tokenizer" , a : List[Any]=True , a : Optional[Any]=0 , a : Any=1 , a : Union[str, Any]=0 , **a : str , ): """simple docstring""" super().__init__( is_encoder_decoder=a , tokenizer_class=a , tie_word_embeddings=a , pad_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , ) __lowerCamelCase = vocab_size __lowerCamelCase = d_model __lowerCamelCase = d_kv __lowerCamelCase = d_ff __lowerCamelCase = num_layers __lowerCamelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __lowerCamelCase = num_heads __lowerCamelCase = relative_attention_num_buckets __lowerCamelCase = relative_attention_max_distance __lowerCamelCase = dropout_rate __lowerCamelCase = layer_norm_epsilon __lowerCamelCase = initializer_factor __lowerCamelCase = feed_forward_proj __lowerCamelCase = use_cache __lowerCamelCase = self.feed_forward_proj.split('''-''' ) __lowerCamelCase = act_info[-1] __lowerCamelCase = act_info[0] == '''gated''' if len(a ) > 1 and act_info[0] != "gated" or len(a ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) if feed_forward_proj == "gated-gelu": __lowerCamelCase = '''gelu_new''' @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" return self.d_model @property def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" return self.num_heads @property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" return self.num_layers class a__ ( UpperCAmelCase__ ): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" __lowerCamelCase = { '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: __lowerCamelCase = '''past_encoder_sequence + sequence''' __lowerCamelCase = {0: '''batch'''} __lowerCamelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: __lowerCamelCase = {0: '''batch''', 1: '''decoder_sequence'''} __lowerCamelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(a , direction='''inputs''' ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" return 13 @property def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" return 5e-4
67
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def lowercase__ ( __lowercase : SplitDict ) -> int: """simple docstring""" __UpperCamelCase = split_dict._to_yaml_list() assert len(__lowercase ) == len(__lowercase ) __UpperCamelCase = SplitDict._from_yaml_list(__lowercase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump __UpperCamelCase = None # the split name of split_dict takes over the name of the split info object __UpperCamelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] ) def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" __UpperCamelCase = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
53
0
from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: bool = True , *SCREAMING_SNAKE_CASE_: Any , **SCREAMING_SNAKE_CASE_: Any ) -> int: '''simple docstring''' if not is_tqdm_available(): raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." ) A__ = False if main_process_only: A__ = PartialState().local_process_index == 0 return _tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , disable=SCREAMING_SNAKE_CASE_ )
68
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[str] ={ '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any =[ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
0
"""simple docstring""" import argparse __UpperCamelCase = '''docs/source/_static/js/custom.js''' def UpperCAmelCase ( UpperCAmelCase ) -> int: with open(UpperCAmelCase , encoding='utf-8' , newline='\n' ) as f: snake_case_ = f.readlines() snake_case_ = 0 # First let's put the right version while not lines[index].startswith('const stableVersion =' ): index += 1 snake_case_ = f'const stableVersion = "v{version}"\n' # Then update the dictionary while not lines[index].startswith('const versionMapping = {' ): index += 1 # We go until the end while not lines[index].startswith('}' ): index += 1 # We add the new version at the end lines[index - 1] += f' "v{version}": "v{version}",\n' with open(UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(UpperCAmelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') __UpperCamelCase = parser.parse_args() update_custom_js(args.version)
69
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a__ : str =logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str =["input_features", "attention_mask"] def __init__( self : Union[str, Any] , __A : Optional[int]=8_0 , __A : Tuple=1_6_0_0_0 , __A : Optional[Any]=8_0 , __A : Any=0.0 , __A : Any=True , __A : List[str]=True , __A : str=True , **__A : List[Any] , ): super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A ) __UpperCamelCase = num_mel_bins __UpperCamelCase = do_ceptral_normalize __UpperCamelCase = normalize_means __UpperCamelCase = normalize_vars __UpperCamelCase = True def _lowerCamelCase ( self : Union[str, Any] , __A : np.ndarray , ): __UpperCamelCase = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers __UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 ) __UpperCamelCase = ta_kaldi.fbank(__A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _lowerCamelCase ( __A : np.ndarray , __A : int , __A : Optional[bool] = True , __A : Optional[bool] = True , __A : float = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: __UpperCamelCase = x[:input_length].mean(axis=0 ) __UpperCamelCase = np.subtract(__A , __A ) if normalize_vars: __UpperCamelCase = x[:input_length].std(axis=0 ) __UpperCamelCase = np.divide(__A , __A ) if input_length < x.shape[0]: __UpperCamelCase = padding_value # make sure array is in float32 __UpperCamelCase = x.astype(np.floataa ) return x def _lowerCamelCase ( self : int , __A : List[np.ndarray] , __A : Optional[np.ndarray] = None ): __UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__A , __A , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(__A , __A ) ] def __call__( self : List[Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : Union[bool, str, PaddingStrategy] = False , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[int] = None , __A : Optional[bool] = None , **__A : Dict , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __UpperCamelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) __UpperCamelCase = is_batched_numpy or ( isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__A , np.ndarray ): __UpperCamelCase = np.asarray(__A , dtype=np.floataa ) elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __UpperCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __UpperCamelCase = [raw_speech] # extract fbank features __UpperCamelCase = [self._extract_fbank_features(__A ) for waveform in raw_speech] # convert into correct format for padding __UpperCamelCase = BatchFeature({'input_features': features} ) __UpperCamelCase = self.pad( __A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , ) # make sure list is in array format __UpperCamelCase = padded_inputs.get('input_features' ) if isinstance(input_features[0] , __A ): __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_features] __UpperCamelCase = padded_inputs.get('attention_mask' ) if attention_mask is not None: __UpperCamelCase = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __UpperCamelCase = ( np.array(__A , dtype=np.intaa ) if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD else None ) __UpperCamelCase = self.normalize( padded_inputs['input_features'] , attention_mask=__A ) if return_tensors is not None: __UpperCamelCase = padded_inputs.convert_to_tensors(__A ) return padded_inputs
53
0
'''simple docstring''' import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig A__ : List[str] ={ '''facebook/maskformer-swin-base-ade''': ( '''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json''' ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } A__ : List[str] =logging.get_logger(__name__) class UpperCAmelCase ( snake_case_ ): _lowercase: Tuple = '''maskformer''' _lowercase: List[Any] = {'''hidden_size''': '''mask_feature_size'''} _lowercase: List[str] = ['''resnet''', '''swin'''] _lowercase: int = ['''detr'''] def __init__( self : List[str] , __snake_case : int = 2_56 , __snake_case : int = 2_56 , __snake_case : float = 0.1 , __snake_case : bool = False , __snake_case : Optional[Dict] = None , __snake_case : Optional[Dict] = None , __snake_case : float = 0.02 , __snake_case : float = 1.0 , __snake_case : float = 1.0 , __snake_case : float = 1.0 , __snake_case : float = 20.0 , __snake_case : Optional[bool] = None , **__snake_case : List[Any] , ) -> int: if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k _lowerCAmelCase = SwinConfig( image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) if isinstance(__snake_case , __snake_case ): _lowerCAmelCase = backbone_config.pop("""model_type""" ) _lowerCAmelCase = CONFIG_MAPPING[backbone_model_type] _lowerCAmelCase = config_class.from_dict(__snake_case ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. " f"Supported model types: {','.join(self.backbones_supported )}" ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 _lowerCAmelCase = DetrConfig() else: # verify that the decoder is supported _lowerCAmelCase = ( decoder_config.pop("""model_type""" ) if isinstance(__snake_case , __snake_case ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( f"Transformer Decoder {decoder_type} not supported, please use one of" f" {','.join(self.decoders_supported )}" ) if isinstance(__snake_case , __snake_case ): _lowerCAmelCase = CONFIG_MAPPING[decoder_type] _lowerCAmelCase = config_class.from_dict(__snake_case ) _lowerCAmelCase = backbone_config _lowerCAmelCase = decoder_config # main feature dimension for the model _lowerCAmelCase = fpn_feature_size _lowerCAmelCase = mask_feature_size # initializer _lowerCAmelCase = init_std _lowerCAmelCase = init_xavier_std # Hungarian matcher && loss _lowerCAmelCase = cross_entropy_weight _lowerCAmelCase = dice_weight _lowerCAmelCase = mask_weight _lowerCAmelCase = use_auxiliary_loss _lowerCAmelCase = no_object_weight _lowerCAmelCase = output_auxiliary_logits _lowerCAmelCase = self.decoder_config.encoder_attention_heads _lowerCAmelCase = self.decoder_config.num_hidden_layers super().__init__(**__snake_case ) @classmethod def lowercase__ ( cls : Optional[int] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : int ) -> Union[str, Any]: return cls( backbone_config=__snake_case , decoder_config=__snake_case , **__snake_case , ) def lowercase__ ( self : Union[str, Any] ) -> Dict[str, any]: _lowerCAmelCase = copy.deepcopy(self.__dict__ ) _lowerCAmelCase = self.backbone_config.to_dict() _lowerCAmelCase = self.decoder_config.to_dict() _lowerCAmelCase = self.__class__.model_type return output
70
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : List[Any] =logging.get_logger(__name__) a__ : List[Any] ={ '''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model" def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ): super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = project_dim class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model" def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ): super().__init__(**__A ) __UpperCamelCase = hidden_size __UpperCamelCase = intermediate_size __UpperCamelCase = projection_dim __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = num_channels __UpperCamelCase = patch_size __UpperCamelCase = image_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = attention_dropout __UpperCamelCase = layer_norm_eps __UpperCamelCase = hidden_act @classmethod def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ): cls._set_token_in_kwargs(__A ) __UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('model_type' ) == "altclip": __UpperCamelCase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] ="altclip" SCREAMING_SNAKE_CASE_ : Optional[int] =True def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). __UpperCamelCase = kwargs.pop('text_config_dict' , __A ) __UpperCamelCase = kwargs.pop('vision_config_dict' , __A ) super().__init__(**__A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __UpperCamelCase = {} # This is the complete result when using `text_config_dict`. __UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __UpperCamelCase = {} # This is the complete result when using `vision_config_dict`. __UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __UpperCamelCase = { str(__A ): value for key, value in _vision_config_dict['id2label'].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __UpperCamelCase = {} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' ) if vision_config is None: __UpperCamelCase = {} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' ) __UpperCamelCase = AltCLIPTextConfig(**__A ) __UpperCamelCase = AltCLIPVisionConfig(**__A ) __UpperCamelCase = projection_dim __UpperCamelCase = logit_scale_init_value __UpperCamelCase = 1.0 @classmethod def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = copy.deepcopy(self.__dict__ ) __UpperCamelCase = self.text_config.to_dict() __UpperCamelCase = self.vision_config.to_dict() __UpperCamelCase = self.__class__.model_type return output
53
0
from datetime import datetime import requests def A ( a_ ) -> bytes: __UpperCamelCase : Any ='https://downloadgram.net/wp-json/wppress/video-downloader/video?url=' __UpperCamelCase : Optional[int] =requests.get(base_url + url ).json()[0]['urls'][0]['src'] return requests.get(a_ ).content if __name__ == "__main__": A_ :str = input('''Enter Video/IGTV url: ''').strip() A_ :int = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4" with open(file_name, '''wb''') as fp: fp.write(download_video(url)) print(f"Done. Video saved to disk as {file_name}.")
71
'''simple docstring''' import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Any ) -> Optional[Any]: """simple docstring""" with open(__lowercase ) as metadata_file: __UpperCamelCase = json.load(__lowercase ) __UpperCamelCase = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['model_config'] ) # Load in the weights from the checkpoint_path __UpperCamelCase = torch.load(__lowercase , map_location='cpu' ) # Load the entity vocab file __UpperCamelCase = load_entity_vocab(__lowercase ) __UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks __UpperCamelCase = AddedToken('<ent>' , lstrip=__lowercase , rstrip=__lowercase ) __UpperCamelCase = AddedToken('<ent2>' , lstrip=__lowercase , rstrip=__lowercase ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(__lowercase ) with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(__lowercase , __lowercase ) __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase ) # Initialize the embeddings of the special tokens __UpperCamelCase = state_dict['embeddings.word_embeddings.weight'] __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 ) __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 ) __UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __UpperCamelCase = F'''encoder.layer.{layer_index}.attention.self.''' __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight'] __UpperCamelCase = entity_emb[entity_vocab['[MASK]']] __UpperCamelCase = LukeModel(config=__lowercase ).eval() __UpperCamelCase , __UpperCamelCase = model.load_state_dict(__lowercase , strict=__lowercase ) if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'''Missing keys {', '.join(__lowercase )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )): raise ValueError( 'Unexpected keys' F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' ) # Check outputs __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase , task='entity_classification' ) __UpperCamelCase = ( 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the' ' new world number one avoid a humiliating second- round exit at Wimbledon .' ) __UpperCamelCase = (39, 42) __UpperCamelCase = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors='pt' ) __UpperCamelCase = model(**__lowercase ) # Verify word hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 42, 1024) ) __UpperCamelCase = torch.tensor( [[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] ) else: # base __UpperCamelCase = torch.Size((1, 42, 768) ) __UpperCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 1, 1024) ) __UpperCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] ) else: # base __UpperCamelCase = torch.Size((1, 1, 768) ) __UpperCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(__lowercase ) ) model.save_pretrained(__lowercase ) def lowercase__ ( __lowercase : Dict ) -> List[str]: """simple docstring""" __UpperCamelCase = {} with open(__lowercase , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(__lowercase ): __UpperCamelCase , __UpperCamelCase = line.rstrip().split('\t' ) __UpperCamelCase = index return entity_vocab if __name__ == "__main__": a__ : Any =argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) a__ : str =parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
53
0
"""simple docstring""" import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def snake_case_ ( A_ : Optional[Any] ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def snake_case_ ( A_ : Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Tuple = np.max(_outputs, axis=-1, keepdims=A_ ) _lowerCamelCase : str = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=A_ ) class __snake_case ( _lowercase): snake_case__ : Tuple = "sigmoid" snake_case__ : str = "softmax" snake_case__ : Tuple = "none" @add_end_docstrings( _lowercase , R"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , ) class __snake_case ( _lowercase): snake_case__ : Dict = False snake_case__ : List[str] = ClassificationFunction.NONE def __init__( self : List[str] , **__lowerCAmelCase : Optional[Any] ): """simple docstring""" super().__init__(**__lowerCAmelCase ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : List[str]="" , **__lowerCAmelCase : Optional[Any] ): """simple docstring""" _lowerCamelCase : Any = tokenizer_kwargs _lowerCamelCase : List[Any] = {} if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None: _lowerCamelCase : str = self.model.config.return_all_scores if isinstance(__lowerCAmelCase , __lowerCAmelCase ) or top_k is None: _lowerCamelCase : Tuple = top_k _lowerCamelCase : List[str] = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , __lowerCAmelCase , ) if return_all_scores: _lowerCamelCase : Dict = None else: _lowerCamelCase : Any = 1 if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _lowerCamelCase : str = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: _lowerCamelCase : int = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self : Tuple , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = super().__call__(*__lowerCAmelCase , **__lowerCAmelCase ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. _lowerCamelCase : Dict = '''top_k''' not in kwargs if isinstance(args[0] , __lowerCAmelCase ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Optional[Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = self.framework if isinstance(__lowerCAmelCase , __lowerCAmelCase ): return self.tokenizer(**__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] , __lowerCAmelCase ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' ) return self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" return self.model(**__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : int=1 , __lowerCAmelCase : Dict=True ): """simple docstring""" if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: _lowerCamelCase : Optional[int] = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: _lowerCamelCase : Optional[int] = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None: _lowerCamelCase : List[str] = self.model.config.function_to_apply else: _lowerCamelCase : List[Any] = ClassificationFunction.NONE _lowerCamelCase : Union[str, Any] = model_outputs['''logits'''][0] _lowerCamelCase : Dict = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: _lowerCamelCase : Dict = sigmoid(__lowerCAmelCase ) elif function_to_apply == ClassificationFunction.SOFTMAX: _lowerCamelCase : Tuple = softmax(__lowerCAmelCase ) elif function_to_apply == ClassificationFunction.NONE: _lowerCamelCase : str = outputs else: raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} _lowerCamelCase : Tuple = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(__lowerCAmelCase ) ] if not _legacy: dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] , reverse=__lowerCAmelCase ) if top_k is not None: _lowerCamelCase : Tuple = dict_scores[:top_k] return dict_scores
72
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class snake_case ( __lowerCamelCase ): """simple docstring""" def _lowerCamelCase ( self : Any ): __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = 8 # DPR tok __UpperCamelCase = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __UpperCamelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok __UpperCamelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) ) __UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __UpperCamelCase = {'unk_token': '<unk>'} __UpperCamelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__A ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__A ) ) def _lowerCamelCase ( self : Tuple ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Optional[int] ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Union[str, Any] ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def _lowerCamelCase ( self : str ): shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _lowerCamelCase ( self : Any , __A : bool ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , ) if from_disk: __UpperCamelCase = os.path.join(self.tmpdirname , 'dataset' ) __UpperCamelCase = os.path.join(self.tmpdirname , 'index.faiss' ) dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) ) dataset.drop_index('embeddings' ) dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) ) del dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , ) return retriever def _lowerCamelCase ( self : int ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) __UpperCamelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' ) dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' ) pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) ) __UpperCamelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' ) __UpperCamelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset} pickle.dump(__A , open(__A , 'wb' ) ) __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , ) __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = self.get_dummy_dataset() retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_legacy_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] ) self.assertEqual(len(doc_dicts[0]['text'] ) , __A ) self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Optional[Any] ): import torch __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , np.ndarray ) __UpperCamelCase = retriever( __A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( # noqa: F841 out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], out['doc_ids'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dpr_ctx_encoder_tokenizer() __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) retriever.set_ctx_encoder_tokenizer(__A ) __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) self.assertEqual( len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
53
0
from ...configuration_utils import PretrainedConfig from ...utils import logging a =logging.get_logger(__name__) a ={ """transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""", } class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Any = '''transfo-xl''' _UpperCAmelCase : Union[str, Any] = ['''mems'''] _UpperCAmelCase : List[Any] = { '''n_token''': '''vocab_size''', '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_6_7_7_3_5 ,SCREAMING_SNAKE_CASE__ : List[Any]=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] ,SCREAMING_SNAKE_CASE__ : List[Any]=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : int=1_6 ,SCREAMING_SNAKE_CASE__ : List[str]=6_4 ,SCREAMING_SNAKE_CASE__ : Any=4_0_9_6 ,SCREAMING_SNAKE_CASE__ : int=4 ,SCREAMING_SNAKE_CASE__ : List[Any]=False ,SCREAMING_SNAKE_CASE__ : List[str]=1_8 ,SCREAMING_SNAKE_CASE__ : Any=1_6_0_0 ,SCREAMING_SNAKE_CASE__ : List[Any]=1_0_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : Tuple=0 ,SCREAMING_SNAKE_CASE__ : str=-1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]="normal" ,SCREAMING_SNAKE_CASE__ : List[Any]=0.01 ,SCREAMING_SNAKE_CASE__ : Dict=0.01 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1E-5 ,SCREAMING_SNAKE_CASE__ : List[str]=0 ,**SCREAMING_SNAKE_CASE__ : List[Any] ,): __lowerCamelCase : Tuple = vocab_size __lowerCamelCase : Optional[int] = [] self.cutoffs.extend(SCREAMING_SNAKE_CASE__) if proj_share_all_but_first: __lowerCamelCase : List[Any] = [False] + [True] * len(self.cutoffs) else: __lowerCamelCase : str = [False] + [False] * len(self.cutoffs) __lowerCamelCase : str = d_model __lowerCamelCase : Dict = d_embed __lowerCamelCase : Any = d_head __lowerCamelCase : List[Any] = d_inner __lowerCamelCase : Dict = div_val __lowerCamelCase : List[Any] = pre_lnorm __lowerCamelCase : Tuple = n_layer __lowerCamelCase : int = n_head __lowerCamelCase : Optional[int] = mem_len __lowerCamelCase : List[str] = same_length __lowerCamelCase : Any = attn_type __lowerCamelCase : int = clamp_len __lowerCamelCase : str = sample_softmax __lowerCamelCase : List[Any] = adaptive __lowerCamelCase : Tuple = dropout __lowerCamelCase : List[Any] = dropatt __lowerCamelCase : Union[str, Any] = untie_r __lowerCamelCase : Any = init __lowerCamelCase : List[str] = init_range __lowerCamelCase : Optional[int] = proj_init_std __lowerCamelCase : List[Any] = init_std __lowerCamelCase : Any = layer_norm_epsilon super().__init__(eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) @property def lowerCAmelCase ( self : List[Any]): # Message copied from Transformer-XL documentation logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit.") return -1 @max_position_embeddings.setter def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Dict): # Message copied from Transformer-XL documentation raise NotImplementedError( F"The model {self.model_type} is one of the few models that has no sequence length limit.")
73
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[Any] ={ '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] =[ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys a__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
0
"""simple docstring""" # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : int ,A_ : Tuple ,A_ : List[str] ) -> Tuple: super().__init__() self.register_modules(unet=A_ ,scheduler=A_ ) @torch.no_grad() def __call__( self : Optional[Any] ,A_ : int = 1 ,A_ : Optional[torch.Generator] = None ,A_ : int = 50 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Tuple ,) -> Union[ImagePipelineOutput, Tuple]: A = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) ,generator=A_ ,) A = image.to(self.device ) # set step values self.scheduler.set_timesteps(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A = self.unet(A_ ,A_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample A = (image / 2 + 0.5).clamp(0 ,1 ) A = image.cpu().permute(0 ,2 ,3 ,1 ).numpy() if output_type == "pil": A = self.numpy_to_pil(A_ ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=A_ ), "This is a local test"
74
'''simple docstring''' import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any]=False ) -> Tuple: """simple docstring""" try: __UpperCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __UpperCamelCase = default else: # KEY is set, convert it to True or False. try: __UpperCamelCase = strtobool(__lowercase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value a__ : str =parse_flag_from_env('''RUN_SLOW''', default=False) a__ : Union[str, Any] =parse_flag_from_env('''RUN_REMOTE''', default=False) a__ : List[str] =parse_flag_from_env('''RUN_LOCAL''', default=True) a__ : Optional[int] =parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression a__ : Any =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') a__ : Optional[int] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') a__ : List[str] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio a__ : Any =pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam a__ : Tuple =pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility a__ : Union[str, Any] =pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows a__ : int =pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase__ ( __lowercase : Optional[Any] ) -> Optional[int]: """simple docstring""" try: import faiss # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires faiss' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Any: """simple docstring""" try: import regex # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires regex' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Tuple ) -> List[Any]: """simple docstring""" try: import elasticsearch # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires elasticsearch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Tuple: """simple docstring""" try: import sqlalchemy # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires sqlalchemy' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[str] ) -> List[str]: """simple docstring""" if not config.TORCH_AVAILABLE: __UpperCamelCase = unittest.skip('test requires PyTorch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[Any] ) -> List[str]: """simple docstring""" if not config.TF_AVAILABLE: __UpperCamelCase = unittest.skip('test requires TensorFlow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : int ) -> Union[str, Any]: """simple docstring""" if not config.JAX_AVAILABLE: __UpperCamelCase = unittest.skip('test requires JAX' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> Optional[Any]: """simple docstring""" if not config.PIL_AVAILABLE: __UpperCamelCase = unittest.skip('test requires Pillow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : int ) -> int: """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> int: """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> Any: """simple docstring""" def _require_spacy_model(__lowercase : Any ): try: import spacy # noqa F401 spacy.load(__lowercase ) except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(__lowercase ) )(__lowercase ) else: return test_case return _require_spacy_model def lowercase__ ( __lowercase : Union[str, Any] ) -> str: """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]: """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: __UpperCamelCase = unittest.skip('test is slow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_local_tests or _run_local_tests == 0: __UpperCamelCase = unittest.skip('test is local' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: __UpperCamelCase = unittest.skip('test is packaged' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Any: """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: __UpperCamelCase = unittest.skip('test requires remote' )(__lowercase ) return test_case def lowercase__ ( *__lowercase : Optional[Any] ) -> Tuple: """simple docstring""" def decorate(cls : int ): for name, fn in cls.__dict__.items(): if callable(__lowercase ) and name.startswith('test' ): for decorator in decorators: __UpperCamelCase = decorator(__lowercase ) setattr(cls , __lowercase , __lowercase ) return cls return decorate class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =0 SCREAMING_SNAKE_CASE_ : List[Any] =1 SCREAMING_SNAKE_CASE_ : Union[str, Any] =2 @contextmanager def lowercase__ ( __lowercase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , __lowercase : Dict=1e-16 ) -> List[Any]: """simple docstring""" __UpperCamelCase = requests.Session().request def timeout_request(__lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , **__lowercase : List[str] ): # Change the url to an invalid url so that the connection hangs __UpperCamelCase = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) __UpperCamelCase = timeout try: return online_request(__lowercase , __lowercase , **__lowercase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __UpperCamelCase = url __UpperCamelCase = e.args[0] __UpperCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]''' ),) __UpperCamelCase = (max_retry_error,) raise def raise_connection_error(__lowercase : int , __lowercase : List[str] , **__lowercase : Union[str, Any] ): raise requests.ConnectionError('Offline mode is enabled.' , request=__lowercase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , __lowercase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , __lowercase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , __lowercase ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def lowercase__ ( *__lowercase : Any , **__lowercase : Dict ) -> Dict: """simple docstring""" __UpperCamelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__lowercase , **__lowercase ) as tmp_dir: try: os.chdir(__lowercase ) yield finally: os.chdir(__lowercase ) @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase__ ( __lowercase : List[str] , __lowercase : int ) -> Union[str, Any]: """simple docstring""" return deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(__lowercase : List[Any] , *__lowercase : Tuple , **__lowercase : Union[str, Any] ): try: return func(*__lowercase , **__lowercase ) except HTTPError as err: if str(__lowercase ).startswith('500' ) or str(__lowercase ).startswith('502' ): pytest.xfail(str(__lowercase ) ) raise err return decorator.decorator(_wrapper , __lowercase ) class snake_case : """simple docstring""" def __init__( self : int , __A : Any , __A : str , __A : List[Any] ): __UpperCamelCase = returncode __UpperCamelCase = stdout __UpperCamelCase = stderr async def lowercase__ ( __lowercase : Any , __lowercase : Optional[int] ) -> str: """simple docstring""" while True: __UpperCamelCase = await stream.readline() if line: callback(__lowercase ) else: break async def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : Any=None , __lowercase : Optional[Any]=None , __lowercase : int=False , __lowercase : List[Any]=False ) -> _RunOutput: """simple docstring""" if echo: print('\nRunning: ' , ' '.join(__lowercase ) ) __UpperCamelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __UpperCamelCase = [] __UpperCamelCase = [] def tee(__lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Tuple="" ): __UpperCamelCase = line.decode('utf-8' ).rstrip() sink.append(__lowercase ) if not quiet: print(__lowercase , __lowercase , file=__lowercase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label='stderr:' ) ), ] , timeout=__lowercase , ) return _RunOutput(await p.wait() , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : int=180 , __lowercase : int=False , __lowercase : str=True ) -> _RunOutput: """simple docstring""" __UpperCamelCase = asyncio.get_event_loop() __UpperCamelCase = loop.run_until_complete( _stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) ) __UpperCamelCase = ' '.join(__lowercase ) if result.returncode > 0: __UpperCamelCase = '\n'.join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' ) return result def lowercase__ ( ) -> List[str]: """simple docstring""" __UpperCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) __UpperCamelCase = re.sub(R'^gw' , '' , __lowercase , 0 , re.M ) return int(__lowercase ) def lowercase__ ( ) -> List[Any]: """simple docstring""" __UpperCamelCase = 29500 __UpperCamelCase = pytest_xdist_worker_id() return port + uniq_delta
53
0
'''simple docstring''' import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging a_ : Optional[Any] = logging.get_logger(__name__) def a_ ( ) -> Union[str, Any]: """simple docstring""" # Get the sagemaker specific mp parameters from smp_options variable. lowerCamelCase_ =os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. lowerCamelCase_ =json.loads(__snake_case ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. lowerCamelCase_ =os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". lowerCamelCase_ =json.loads(__snake_case ) if not mpi_options.get('''sagemaker_mpi_enabled''' , __snake_case ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('''smdistributed''' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : str =field( default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , ) def lowercase__ ( self ): """simple docstring""" super().__post_init__() warnings.warn( '''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ''' '''`TrainingArguments` instead.''', lowerCAmelCase, ) @cached_property def lowercase__ ( self ): """simple docstring""" logger.info('''PyTorch: setting up devices''' ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( '''torch.distributed process group is initialized, but local_rank == -1. ''' '''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' ) if self.no_cuda: lowerCamelCase_ =torch.device('''cpu''' ) lowerCamelCase_ =0 elif is_sagemaker_model_parallel_available(): lowerCamelCase_ =smp.local_rank() lowerCamelCase_ =torch.device('''cuda''', lowerCAmelCase ) lowerCamelCase_ =1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='''smddp''', timeout=self.ddp_timeout_delta ) lowerCamelCase_ =int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) ) lowerCamelCase_ =torch.device('''cuda''', self.local_rank ) lowerCamelCase_ =1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 lowerCamelCase_ =torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. lowerCamelCase_ =torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='''nccl''', timeout=self.ddp_timeout_delta ) lowerCamelCase_ =torch.device('''cuda''', self.local_rank ) lowerCamelCase_ =1 if device.type == "cuda": torch.cuda.set_device(lowerCAmelCase ) return device @property def lowercase__ ( self ): """simple docstring""" if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def lowercase__ ( self ): """simple docstring""" return not is_sagemaker_model_parallel_available() @property def lowercase__ ( self ): """simple docstring""" return False
75
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys a__ : Tuple ='''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
53
0
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def lowerCamelCase__ ( _a , _a): SCREAMING_SNAKE_CASE : Optional[int] = args.log_outputs SCREAMING_SNAKE_CASE : int = "_".join(args.dataset.split("/") + [args.config, args.split]) # load metric SCREAMING_SNAKE_CASE : List[str] = load_metric("wer") SCREAMING_SNAKE_CASE : List[Any] = load_metric("cer") # compute metrics SCREAMING_SNAKE_CASE : Dict = wer.compute(references=result["target"] , predictions=result["prediction"]) SCREAMING_SNAKE_CASE : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"]) # print & log results SCREAMING_SNAKE_CASE : List[str] = f"WER: {wer_result}\nCER: {cer_result}" print(_a) with open(f"{dataset_id}_eval_results.txt" , "w") as f: f.write(_a) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: SCREAMING_SNAKE_CASE : Optional[Any] = f"log_{dataset_id}_predictions.txt" SCREAMING_SNAKE_CASE : Optional[int] = f"log_{dataset_id}_targets.txt" with open(_a , "w") as p, open(_a , "w") as t: # mapping function to write output def write_to_file(_a , _a): p.write(f"{i}" + "\n") p.write(batch["prediction"] + "\n") t.write(f"{i}" + "\n") t.write(batch["target"] + "\n") result.map(_a , with_indices=_a) def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(_a , "" , text.lower()) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! SCREAMING_SNAKE_CASE : int = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: SCREAMING_SNAKE_CASE : Optional[Any] = " ".join(text.split(_a)) return text def lowerCamelCase__ ( _a): # load dataset SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_a) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor SCREAMING_SNAKE_CASE : Dict = AutoFeatureExtractor.from_pretrained(args.model_id) SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor.sampling_rate # resample audio SCREAMING_SNAKE_CASE : Union[str, Any] = dataset.cast_column("audio" , Audio(sampling_rate=_a)) # load eval pipeline if args.device is None: SCREAMING_SNAKE_CASE : int = 0 if torch.cuda.is_available() else -1 SCREAMING_SNAKE_CASE : Any = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device) # map function to decode audio def map_to_pred(_a): SCREAMING_SNAKE_CASE : List[Any] = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s) SCREAMING_SNAKE_CASE : List[Any] = prediction["text"] SCREAMING_SNAKE_CASE : Tuple = normalize_text(batch["sentence"]) return batch # run inference on all examples SCREAMING_SNAKE_CASE : Union[str, Any] = dataset.map(_a , remove_columns=dataset.column_names) # compute and log_results # do not change function below log_results(_a , _a) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( '--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers' ) parser.add_argument( '--dataset', type=str, required=True, help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets', ) parser.add_argument( '--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice' ) parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`') parser.add_argument( '--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.' ) parser.add_argument( '--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.' ) parser.add_argument( '--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.' ) parser.add_argument( '--device', type=int, default=None, help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.', ) a_ = parser.parse_args() main(args)
76
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Tuple ) -> Tuple: """simple docstring""" return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :] def lowercase__ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] , __lowercase : List[str]="attention" ) -> Optional[Any]: """simple docstring""" __UpperCamelCase = __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] ) __UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] ) __UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] ) __UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] ) __UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : int , __lowercase : List[Any]=False ) -> Optional[Any]: """simple docstring""" if split_mlp_wi: __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :] __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :] __UpperCamelCase = (wi_a, wi_a) else: __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :] __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :] return wi, wo def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] ) -> str: """simple docstring""" return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i] def lowercase__ ( __lowercase : dict , *, __lowercase : int , __lowercase : bool , __lowercase : bool = False ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = traverse_util.flatten_dict(variables['target'] ) __UpperCamelCase = {'/'.join(__lowercase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old print('Split MLP:' , __lowercase ) __UpperCamelCase = collections.OrderedDict() # Shared embeddings. __UpperCamelCase = old['token_embedder/embedding'] # Encoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'encoder' , 'attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 1 (MLP). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_mlp_layer_norm' ) __UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'encoder' , __lowercase ) __UpperCamelCase = layer_norm if split_mlp_wi: __UpperCamelCase = wi[0].T __UpperCamelCase = wi[1].T else: __UpperCamelCase = wi.T __UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , __lowercase , 'encoder' ).T __UpperCamelCase = old['encoder/encoder_norm/scale'] if not scalable_attention: __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , 0 , 'encoder' ).T __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , 0 , 'decoder' ).T if not is_encoder_only: # Decoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_self_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'self_attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 1 (Cross Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_cross_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'encoder_decoder_attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 2 (MLP). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_mlp_layer_norm' ) __UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'decoder' , __lowercase ) __UpperCamelCase = layer_norm if split_mlp_wi: __UpperCamelCase = wi[0].T __UpperCamelCase = wi[1].T else: __UpperCamelCase = wi.T __UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase = tax_relpos_bias_lookup(__lowercase , __lowercase , 'decoder' ).T __UpperCamelCase = old['decoder/decoder_norm/scale'] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __UpperCamelCase = old['decoder/logits_dense/kernel'].T return new def lowercase__ ( __lowercase : Optional[Any] , __lowercase : bool ) -> int: """simple docstring""" __UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __UpperCamelCase = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __UpperCamelCase = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) __UpperCamelCase = state_dict['shared.weight'] return state_dict def lowercase__ ( __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = checkpoints.load_tax_checkpoint(__lowercase ) __UpperCamelCase = convert_tax_to_pytorch( __lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase , scalable_attention=__lowercase ) __UpperCamelCase = make_state_dict(__lowercase , __lowercase ) model.load_state_dict(__lowercase , strict=__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : bool = False , __lowercase : bool = False , ) -> Optional[int]: """simple docstring""" __UpperCamelCase = MTaConfig.from_json_file(__lowercase ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __UpperCamelCase = UMTaEncoderModel(__lowercase ) else: __UpperCamelCase = UMTaForConditionalGeneration(__lowercase ) # Load weights from tf checkpoint load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(__lowercase ) # Verify that we can load the checkpoint. model.from_pretrained(__lowercase ) print('Done' ) if __name__ == "__main__": a__ : List[Any] =argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) parser.add_argument( '''--scalable_attention''', action='''store_true''', help='''Whether the model uses scaled attention (umt5 model)''', default=False, ) a__ : List[str] =parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
53
0
"""simple docstring""" from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ): '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=True ): '''simple docstring''' model.train() lowercase__ : Optional[int] = model(_lowerCAmelCase ) lowercase__ : int = F.mse_loss(_lowerCAmelCase , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(_lowerCAmelCase ) def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str=False ): '''simple docstring''' set_seed(42 ) lowercase__ : Union[str, Any] = RegressionModel() lowercase__ : Dict = deepcopy(_lowerCAmelCase ) lowercase__ : Dict = RegressionDataset(length=80 ) lowercase__ : int = DataLoader(_lowerCAmelCase , batch_size=16 ) model.to(accelerator.device ) if sched: lowercase__ : int = AdamW(params=model.parameters() , lr=1E-3 ) lowercase__ : Dict = AdamW(params=ddp_model.parameters() , lr=1E-3 ) lowercase__ : str = LambdaLR(_lowerCAmelCase , lr_lambda=lambda _lowerCAmelCase : epoch**0.6_5 ) lowercase__ : Optional[Any] = LambdaLR(_lowerCAmelCase , lr_lambda=lambda _lowerCAmelCase : epoch**0.6_5 ) # Make a copy of `model` if sched: lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: lowercase__ , lowercase__ : str = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def a_ ( _lowerCAmelCase : Optional[Any] ): '''simple docstring''' lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = get_training_setup(_lowerCAmelCase ) # Use a single batch lowercase__ , lowercase__ : Optional[int] = next(iter(_lowerCAmelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target) ) lowercase__ , lowercase__ : Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_lowerCAmelCase ): step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: # Sync grads step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCAmelCase ) )] def a_ ( _lowerCAmelCase : Any ): '''simple docstring''' lowercase__ , lowercase__ , lowercase__ : List[str] = get_training_setup(_lowerCAmelCase ) # Use a single batch lowercase__ , lowercase__ : Tuple = next(iter(_lowerCAmelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : str = accelerator.gather((ddp_input, ddp_target) ) lowercase__ , lowercase__ : Dict = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_lowerCAmelCase ): step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: # Sync grads step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) lowercase__ : List[Any] = ddp_input[torch.randperm(len(_lowerCAmelCase ) )] def a_ ( _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[int]=False ): '''simple docstring''' lowercase__ : str = Accelerator( split_batches=_lowerCAmelCase , dispatch_batches=_lowerCAmelCase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase__ , lowercase__ , lowercase__ : Optional[Any] = get_training_setup(_lowerCAmelCase ) for iteration, batch in enumerate(_lowerCAmelCase ): lowercase__ , lowercase__ : Optional[int] = batch.values() # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : Dict = accelerator.gather((ddp_input, ddp_target) ) lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(_lowerCAmelCase ): step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCAmelCase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) lowercase__ : int = ddp_input[torch.randperm(len(_lowerCAmelCase ) )] GradientState._reset_state() def a_ ( _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Optional[int]=False ): '''simple docstring''' lowercase__ : List[Any] = Accelerator( split_batches=_lowerCAmelCase , dispatch_batches=_lowerCAmelCase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = get_training_setup(_lowerCAmelCase , _lowerCAmelCase ) for iteration, batch in enumerate(_lowerCAmelCase ): lowercase__ , lowercase__ : str = batch.values() # Gather the distributed inputs and targs for the base model lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target) ) lowercase__ , lowercase__ : List[str] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCAmelCase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(_lowerCAmelCase ): step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" lowercase__ : List[str] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCAmelCase )) if accelerator.num_processes > 1: check_model_parameters(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def a_ ( ): '''simple docstring''' lowercase__ : List[str] = Accelerator() lowercase__ : int = RegressionDataset(length=80 ) lowercase__ : Tuple = DataLoader(_lowerCAmelCase , batch_size=16 ) lowercase__ : Optional[int] = RegressionDataset(length=96 ) lowercase__ : List[str] = DataLoader(_lowerCAmelCase , batch_size=16 ) lowercase__ , lowercase__ : Optional[int] = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(_lowerCAmelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCAmelCase ) if iteration < len(_lowerCAmelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(_lowerCAmelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCAmelCase ) if batch_num < len(_lowerCAmelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def a_ ( ): '''simple docstring''' lowercase__ : List[str] = Accelerator() lowercase__ : str = accelerator.state if state.local_process_index == 0: print('**Test `accumulate` gradient accumulation with dataloader break**' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('**Test NOOP `no_sync` context manager**' ) test_noop_sync(_lowerCAmelCase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('**Test Distributed `no_sync` context manager**' ) test_distributed_sync(_lowerCAmelCase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation, ' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(_lowerCAmelCase , _lowerCAmelCase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(_lowerCAmelCase , _lowerCAmelCase ) def a_ ( _lowerCAmelCase : str ): '''simple docstring''' main() if __name__ == "__main__": main()
77
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ : List[Any] ="BlipImageProcessor" SCREAMING_SNAKE_CASE_ : Optional[int] =("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , __A : Optional[int] , __A : List[Any] ): __UpperCamelCase = False super().__init__(__A , __A ) __UpperCamelCase = self.image_processor def __call__( self : List[Any] , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ): if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: __UpperCamelCase = self.tokenizer __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) return text_encoding # add pixel_values __UpperCamelCase = self.image_processor(__A , return_tensors=__A ) if text is not None: __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) else: __UpperCamelCase = None if text_encoding is not None: encoding_image_processor.update(__A ) return encoding_image_processor def _lowerCamelCase ( self : List[Any] , *__A : Dict , **__A : Optional[int] ): return self.tokenizer.batch_decode(*__A , **__A ) def _lowerCamelCase ( self : List[Any] , *__A : List[str] , **__A : Dict ): return self.tokenizer.decode(*__A , **__A ) @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.tokenizer.model_input_names __UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
53
0
"""simple docstring""" import argparse import struct import unittest class A_ : """simple docstring""" def __init__( self :int , lowercase_ :bytes ) -> None: UpperCAmelCase = data # Initialize hash values UpperCAmelCase = [ 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19, ] # Initialize round constants UpperCAmelCase = [ 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2, ] UpperCAmelCase = self.preprocessing(self.data ) self.final_hash() @staticmethod def UpperCAmelCase__ ( lowercase_ :bytes ) -> bytes: UpperCAmelCase = b'\x80' + (b'\x00' * (63 - (len(lowercase_ ) + 8) % 64)) UpperCAmelCase = struct.pack('>Q' , (len(lowercase_ ) * 8) ) return data + padding + big_endian_integer def UpperCAmelCase__ ( self :List[str] ) -> None: # Convert into blocks of 64 bytes UpperCAmelCase = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers UpperCAmelCase = list(struct.unpack('>16L' , lowercase_ ) ) # add 48 0-ed integers words += [0] * 48 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array UpperCAmelCase = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) UpperCAmelCase = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) UpperCAmelCase = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x100000000 # Compression UpperCAmelCase = self.ror(lowercase_ , 6 ) ^ self.ror(lowercase_ , 11 ) ^ self.ror(lowercase_ , 25 ) UpperCAmelCase = (e & f) ^ ((~e & 0xFFFFFFFF) & g) UpperCAmelCase = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x100000000 UpperCAmelCase = self.ror(lowercase_ , 2 ) ^ self.ror(lowercase_ , 13 ) ^ self.ror(lowercase_ , 22 ) UpperCAmelCase = (a & b) ^ (a & c) ^ (b & c) UpperCAmelCase = (sa + maj) % 0x100000000 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = ( g, f, e, ((d + tempa) % 0x100000000), c, b, a, ((tempa + tempa) % 0x100000000), ) UpperCAmelCase = [a, b, c, d, e, f, g, h] # Modify final values UpperCAmelCase = [ ((element + mutated_hash_values[index]) % 0x100000000) for index, element in enumerate(self.hashes ) ] UpperCAmelCase = ''.join([hex(lowercase_ )[2:].zfill(8 ) for value in self.hashes] ) def UpperCAmelCase__ ( self :Dict , lowercase_ :int , lowercase_ :int ) -> int: return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations) class A_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self :Dict ) -> None: import hashlib UpperCAmelCase = bytes('Test String' , 'utf-8' ) self.assertEqual(SHAaaa(lowercase_ ).hash , hashlib.shaaaa(lowercase_ ).hexdigest() ) def _lowerCAmelCase ( ): import doctest doctest.testmod() UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: UpperCAmelCase = f.read() else: UpperCAmelCase = bytes(lowercase_ , 'utf-8' ) print(SHAaaa(lowercase_ ).hash ) if __name__ == "__main__": main()
78
'''simple docstring''' from __future__ import annotations from typing import Any class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case : """simple docstring""" def __init__( self : List[Any] , __A : Any ): __UpperCamelCase = data __UpperCamelCase = None def __iter__( self : Optional[Any] ): __UpperCamelCase = self __UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(__A ) yield node.data __UpperCamelCase = node.next_node @property def _lowerCamelCase ( self : List[str] ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": a__ : Dict =Node(1) a__ : Optional[int] =Node(2) a__ : List[str] =Node(3) a__ : Optional[int] =Node(4) print(root_node.has_loop) # False a__ : str =root_node.next_node print(root_node.has_loop) # True a__ : Optional[int] =Node(5) a__ : List[Any] =Node(6) a__ : int =Node(5) a__ : Tuple =Node(6) print(root_node.has_loop) # False a__ : str =Node(1) print(root_node.has_loop) # False
53
0
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase ) -> int: '''simple docstring''' for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(__lowercase ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(__lowercase ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
79
'''simple docstring''' a__ : Optional[Any] =256 # Modulus to hash a string a__ : Dict =1_000_003 def lowercase__ ( __lowercase : str , __lowercase : str ) -> bool: """simple docstring""" __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) if p_len > t_len: return False __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 1 # Calculating the hash of pattern and substring of text for i in range(__lowercase ): __UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __UpperCamelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __UpperCamelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def lowercase__ ( ) -> None: """simple docstring""" __UpperCamelCase = 'abc1abc12' __UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __UpperCamelCase = 'alskfjaldsk23adsfabcabc' assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase ) # Test 2) __UpperCamelCase = 'ABABX' __UpperCamelCase = 'ABABZABABYABABX' assert rabin_karp(__lowercase , __lowercase ) # Test 3) __UpperCamelCase = 'AAAB' __UpperCamelCase = 'ABAAAAAB' assert rabin_karp(__lowercase , __lowercase ) # Test 4) __UpperCamelCase = 'abcdabcy' __UpperCamelCase = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(__lowercase , __lowercase ) # Test 5) __UpperCamelCase = 'Lü' __UpperCamelCase = 'Lüsai' assert rabin_karp(__lowercase , __lowercase ) __UpperCamelCase = 'Lue' assert not rabin_karp(__lowercase , __lowercase ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
53
0
'''simple docstring''' import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py a__ : List[str] = 'src/transformers' # This is to make sure the transformers module imported is the one in the repo. a__ : int = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. a__ : str = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') a__ : List[str] = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. a__ : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Fill this with tuples (pipeline_tag, model_mapping, auto_model) a__ : Tuple = [ ('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'), ('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'), ('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'), ('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'), ('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'), ('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'), ('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'), ('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'), ('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'), ( 'zero-shot-object-detection', 'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForZeroShotObjectDetection', ), ('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'), ('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'), ('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'), ('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'), ( 'table-question-answering', 'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForTableQuestionAnswering', ), ('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'), ('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'), ( 'next-sentence-prediction', 'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES', 'AutoModelForNextSentencePrediction', ), ( 'audio-frame-classification', 'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioFrameClassification', ), ('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'), ( 'document-question-answering', 'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForDocumentQuestionAnswering', ), ( 'visual-question-answering', 'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForVisualQuestionAnswering', ), ('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'), ( 'zero-shot-image-classification', 'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForZeroShotImageClassification', ), ('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'), ('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'), ('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'), ] def _UpperCamelCase ( __A ) -> Any: '''simple docstring''' UpperCamelCase__ = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , __A ) return [m.group(0 ) for m in matches] def _UpperCamelCase ( ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES UpperCamelCase__ = { config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. UpperCamelCase__ = collections.defaultdict(__A ) UpperCamelCase__ = collections.defaultdict(__A ) UpperCamelCase__ = collections.defaultdict(__A ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(__A ): UpperCamelCase__ = None if _re_tf_models.match(__A ) is not None: UpperCamelCase__ = tf_models UpperCamelCase__ = _re_tf_models.match(__A ).groups()[0] elif _re_flax_models.match(__A ) is not None: UpperCamelCase__ = flax_models UpperCamelCase__ = _re_flax_models.match(__A ).groups()[0] elif _re_pt_models.match(__A ) is not None: UpperCamelCase__ = pt_models UpperCamelCase__ = _re_pt_models.match(__A ).groups()[0] if lookup_dict is not None: while len(__A ) > 0: if attr_name in model_prefix_to_model_type: UpperCamelCase__ = True break # Try again after removing the last word in the name UpperCamelCase__ = "".join(camel_case_split(__A )[:-1] ) UpperCamelCase__ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) UpperCamelCase__ = list(__A ) all_models.sort() UpperCamelCase__ = {"model_type": all_models} UpperCamelCase__ = [pt_models[t] for t in all_models] UpperCamelCase__ = [tf_models[t] for t in all_models] UpperCamelCase__ = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure UpperCamelCase__ = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: UpperCamelCase__ = "AutoProcessor" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: UpperCamelCase__ = "AutoTokenizer" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: UpperCamelCase__ = "AutoFeatureExtractor" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. UpperCamelCase__ = "AutoTokenizer" UpperCamelCase__ = [processors[t] for t in all_models] return pd.DataFrame(__A ) def _UpperCamelCase ( __A ) -> int: '''simple docstring''' UpperCamelCase__ = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: UpperCamelCase__ = [model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}'''] UpperCamelCase__ = [auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}'''] # Loop through all three frameworks for module, cls, mapping in zip(__A , __A , __A ): # The type of pipeline may not exist in this framework if not hasattr(__A , __A ): continue # First extract all model_names UpperCamelCase__ = [] for name in getattr(__A , __A ).values(): if isinstance(__A , __A ): model_names.append(__A ) else: model_names.extend(list(__A ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def _UpperCamelCase ( __A , __A ) -> Tuple: '''simple docstring''' UpperCamelCase__ = get_frameworks_table() UpperCamelCase__ = Dataset.from_pandas(__A ) UpperCamelCase__ = hf_hub_download( "huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=__A ) UpperCamelCase__ = Dataset.from_json(__A ) UpperCamelCase__ = { tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"]) for i in range(len(__A ) ) } UpperCamelCase__ = update_pipeline_and_auto_class_table(__A ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. UpperCamelCase__ = sorted(table.keys() ) UpperCamelCase__ = pd.DataFrame( { "model_class": model_classes, "pipeline_tag": [table[m][0] for m in model_classes], "auto_class": [table[m][1] for m in model_classes], } ) UpperCamelCase__ = Dataset.from_pandas(__A ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(__A , "frameworks.json" ) ) tags_dataset.to_json(os.path.join(__A , "pipeline_tags.json" ) ) if commit_sha is not None: UpperCamelCase__ = ( F'''Update with commit {commit_sha}\n\nSee: ''' F'''https://github.com/huggingface/transformers/commit/{commit_sha}''' ) else: UpperCamelCase__ = "Update" upload_folder( repo_id="huggingface/transformers-metadata" , folder_path=__A , repo_type="dataset" , token=__A , commit_message=__A , ) def _UpperCamelCase ( ) -> int: '''simple docstring''' UpperCamelCase__ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} UpperCamelCase__ = transformers_module.pipelines.SUPPORTED_TASKS UpperCamelCase__ = [] for key in pipeline_tasks: if key not in in_table: UpperCamelCase__ = pipeline_tasks[key]["pt"] if isinstance(__A , (list, tuple) ): UpperCamelCase__ = model[0] UpperCamelCase__ = model.__name__ if model not in in_table.values(): missing.append(__A ) if len(__A ) > 0: UpperCamelCase__ = ", ".join(__A ) raise ValueError( "The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside " F'''`utils/update_metadata.py`: {msg}. Please add them!''' ) if __name__ == "__main__": a__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.') parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.') parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.') a__ : List[Any] = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
80
'''simple docstring''' from __future__ import annotations class snake_case : """simple docstring""" def __init__( self : Optional[int] , __A : list[list[int]] ): __UpperCamelCase = TypeError( 'Matrices must be formed from a list of zero or more lists containing at ' 'least one and the same number of values, each of which must be of type ' 'int or float.' ) if len(__A ) != 0: __UpperCamelCase = len(rows[0] ) if cols == 0: raise error for row in rows: if len(__A ) != cols: raise error for value in row: if not isinstance(__A , (int, float) ): raise error __UpperCamelCase = rows else: __UpperCamelCase = [] def _lowerCamelCase ( self : int ): return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def _lowerCamelCase ( self : str ): return len(self.rows ) @property def _lowerCamelCase ( self : Any ): return len(self.rows[0] ) @property def _lowerCamelCase ( self : Optional[Any] ): return (self.num_rows, self.num_columns) @property def _lowerCamelCase ( self : Dict ): return self.order[0] == self.order[1] def _lowerCamelCase ( self : Any ): __UpperCamelCase = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Any ): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def _lowerCamelCase ( self : List[str] ): return bool(self.determinant() ) def _lowerCamelCase ( self : Dict , __A : int , __A : int ): __UpperCamelCase = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(__A ).determinant() def _lowerCamelCase ( self : Dict , __A : int , __A : int ): if (row + column) % 2 == 0: return self.get_minor(__A , __A ) return -1 * self.get_minor(__A , __A ) def _lowerCamelCase ( self : List[str] ): return Matrix( [ [self.get_minor(__A , __A ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def _lowerCamelCase ( self : Union[str, Any] ): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = self.determinant() if not determinant: raise TypeError('Only matrices with a non-zero determinant have an inverse' ) return self.adjugate() * (1 / determinant) def __repr__( self : Optional[Any] ): return str(self.rows ) def __str__( self : Union[str, Any] ): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '[' + '. '.join([str(__A ) for value in row] ) + '.]' for row in self.rows ] ) + "]" ) def _lowerCamelCase ( self : List[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError('Row must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in row: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_columns: raise ValueError( 'Row must be equal in length to the other rows in the matrix' ) if position is None: self.rows.append(__A ) else: __UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:] def _lowerCamelCase ( self : Optional[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError( 'Column must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in column: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_rows: raise ValueError( 'Column must be equal in length to the other columns in the matrix' ) if position is None: __UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: __UpperCamelCase = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self : Tuple , __A : object ): if not isinstance(__A , __A ): return NotImplemented return self.rows == other.rows def __ne__( self : Any , __A : object ): return not self == other def __neg__( self : List[Any] ): return self * -1 def __add__( self : List[str] , __A : Matrix ): if self.order != other.order: raise ValueError('Addition requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self : str , __A : Matrix ): if self.order != other.order: raise ValueError('Subtraction requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self : str , __A : Matrix | int | float ): if isinstance(__A , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(__A , __A ): if self.num_columns != other.num_rows: raise ValueError( 'The number of columns in the first matrix must ' 'be equal to the number of rows in the second' ) return Matrix( [ [Matrix.dot_product(__A , __A ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( 'A Matrix can only be multiplied by an int, float, or another matrix' ) def __pow__( self : Union[str, Any] , __A : int ): if not isinstance(__A , __A ): raise TypeError('A Matrix can only be raised to the power of an int' ) if not self.is_square: raise ValueError('Only square matrices can be raised to a power' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( 'Only invertable matrices can be raised to a negative power' ) __UpperCamelCase = self for _ in range(other - 1 ): result *= self return result @classmethod def _lowerCamelCase ( cls : Tuple , __A : list[int] , __A : list[int] ): return sum(row[i] * column[i] for i in range(len(__A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
53
0
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = None class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self , __A=1 , __A=0 , __A=2 , __A=512 , __A="cls" , __A=False , __A=True , **__A , ) -> Union[str, Any]: super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) a =project_dim a =pooler_fn a =learn_encoder a =use_attention_mask class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = [r"pooler", r"logit_scale"] __lowerCAmelCase = [r"position_ids", r"predictions.decoder.bias"] __lowerCAmelCase = "roberta" __lowerCAmelCase = RobertaSeriesConfig def __init__( self , __A ) -> Dict: super().__init__(__A ) a =XLMRobertaModel(__A ) a =nn.Linear(config.hidden_size , config.project_dim ) a =getattr(__A , '''has_pre_transformation''' , __A ) if self.has_pre_transformation: a =nn.Linear(config.hidden_size , config.project_dim ) a =nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def SCREAMING_SNAKE_CASE ( self , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , ) -> Any: a =return_dict if return_dict is not None else self.config.use_return_dict a =self.base_model( input_ids=__A , attention_mask=__A , token_type_ids=__A , position_ids=__A , head_mask=__A , inputs_embeds=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , output_attentions=__A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__A , ) if self.has_pre_transformation: a =outputs['''hidden_states'''][-2] a =self.pre_LN(__A ) a =self.transformation_pre(__A ) return TransformationModelOutput( projection_state=__A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: a =self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=__A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
81
'''simple docstring''' import os import numpy import onnx def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Dict: """simple docstring""" __UpperCamelCase = a.name __UpperCamelCase = b.name __UpperCamelCase = '' __UpperCamelCase = '' __UpperCamelCase = a == b __UpperCamelCase = name_a __UpperCamelCase = name_b return res def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : List[Any] ) -> Optional[int]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(__lowercase , __lowercase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) _graph_replace_input_with(node_proto.attribute[1].g , __lowercase , __lowercase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) def lowercase__ ( __lowercase : int , __lowercase : List[Any] , __lowercase : Dict ) -> int: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(__lowercase , __lowercase , __lowercase ) def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : str ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __UpperCamelCase = inits[i].name __UpperCamelCase = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = os.path.dirname(__lowercase ) __UpperCamelCase = os.path.basename(__lowercase ) __UpperCamelCase = onnx.load(os.path.join(__lowercase , __lowercase ) ) __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = set() __UpperCamelCase = {} __UpperCamelCase = [] __UpperCamelCase = 0 for i in range(len(__lowercase ) ): if i in dup_set: continue for j in range(i + 1 , len(__lowercase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(__lowercase ) dup_set.add(__lowercase ) __UpperCamelCase = inits[j].data_type __UpperCamelCase = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , __lowercase ) total_reduced_size += mem_size __UpperCamelCase = inits[i].name __UpperCamelCase = inits[j].name if name_i in dup_map: dup_map[name_i].append(__lowercase ) else: __UpperCamelCase = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) __UpperCamelCase = sorted(__lowercase ) _remove_dup_initializers_from_model(__lowercase , __lowercase , __lowercase ) __UpperCamelCase = 'optimized_' + model_file_name __UpperCamelCase = os.path.join(__lowercase , __lowercase ) onnx.save(__lowercase , __lowercase ) return new_model
53
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer A__ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast A__ = TaTokenizerFast A__ = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """MT5EncoderModel""", """MT5ForConditionalGeneration""", """MT5ForQuestionAnswering""", """MT5Model""", """MT5PreTrainedModel""", """MT5Stack""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys A__ = _LazyModule( __name__, globals()["""__file__"""], _import_structure, extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast}, module_spec=__spec__, )
82
'''simple docstring''' import random def lowercase__ ( __lowercase : list , __lowercase : Optional[Any] ) -> tuple: """simple docstring""" __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = [], [], [] for element in data: if element < pivot: less.append(__lowercase ) elif element > pivot: greater.append(__lowercase ) else: equal.append(__lowercase ) return less, equal, greater def lowercase__ ( __lowercase : list , __lowercase : int ) -> Dict: """simple docstring""" if index >= len(__lowercase ) or index < 0: return None __UpperCamelCase = items[random.randint(0 , len(__lowercase ) - 1 )] __UpperCamelCase = 0 __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = _partition(__lowercase , __lowercase ) __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__lowercase , __lowercase ) # must be in larger else: return quick_select(__lowercase , index - (m + count) )
53
0
'''simple docstring''' from typing import List import numpy as np def A__ ( UpperCAmelCase_ ): _UpperCamelCase : Optional[Any] = {key: len(UpperCAmelCase_ ) for key, value in gen_kwargs.items() if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( 'Sharding is ambiguous for this dataset: ' + 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n' + '\n'.join(f'\t- key {key} has length {length}' for key, length in lists_lengths.items() ) + '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ' + 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.' ) ) _UpperCamelCase : Union[str, Any] = max(lists_lengths.values() , default=0 ) return max(1 , UpperCAmelCase_ ) def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ): _UpperCamelCase : Dict = [] for group_idx in range(UpperCAmelCase_ ): _UpperCamelCase : Tuple = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _UpperCamelCase : Union[str, Any] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _UpperCamelCase : Union[str, Any] = range(UpperCAmelCase_ , start + num_shards_to_add ) shards_indices_per_group.append(UpperCAmelCase_ ) return shards_indices_per_group def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ): _UpperCamelCase : List[str] = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ ) if num_shards == 1: return [dict(UpperCAmelCase_ )] else: _UpperCamelCase : str = _distribute_shards(num_shards=UpperCAmelCase_ , max_num_jobs=UpperCAmelCase_ ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(UpperCAmelCase_ ) ) ] def A__ ( UpperCAmelCase_ ): return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , UpperCAmelCase_ ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ): _UpperCamelCase : Optional[Any] = {len(UpperCAmelCase_ ) for value in gen_kwargs.values() if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )} _UpperCamelCase : Tuple = {} for size in list_sizes: _UpperCamelCase : str = list(range(UpperCAmelCase_ ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _UpperCamelCase : Tuple = dict(UpperCAmelCase_ ) for key, value in shuffled_kwargs.items(): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): _UpperCamelCase : Tuple = [value[i] for i in indices_per_size[len(UpperCAmelCase_ )]] return shuffled_kwargs
83
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowercase__ ( __lowercase : Any ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def lowercase__ ( __lowercase : Tuple ) -> int: """simple docstring""" __UpperCamelCase , __UpperCamelCase = emb.weight.shape __UpperCamelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) __UpperCamelCase = emb.weight.data return lin_layer def lowercase__ ( __lowercase : int , __lowercase : List[str]="facebook/mbart-large-en-ro" , __lowercase : str=False , __lowercase : List[Any]=False ) -> int: """simple docstring""" __UpperCamelCase = torch.load(__lowercase , map_location='cpu' )['model'] remove_ignore_keys_(__lowercase ) __UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0] __UpperCamelCase = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase ) if mbart_aa and finetuned: __UpperCamelCase = 'relu' __UpperCamelCase = state_dict['decoder.embed_tokens.weight'] __UpperCamelCase = MBartForConditionalGeneration(__lowercase ) model.model.load_state_dict(__lowercase ) if finetuned: __UpperCamelCase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a__ : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') a__ : Union[str, Any] =parser.parse_args() a__ : str =convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
53
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Dict = ["pixel_values"] def __init__( self , __A = True , __A = None , __A = None , __A = PILImageResampling.BILINEAR , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , **__A , ) -> None: super().__init__(**__A ) lowerCAmelCase_ :Any = size if size is not None else {"""shortest_edge""": 384} lowerCAmelCase_ :List[Any] = get_size_dict(__A , default_to_square=__A ) lowerCAmelCase_ :List[str] = do_resize lowerCAmelCase_ :int = size # Default value set here for backwards compatibility where the value in config is None lowerCAmelCase_ :Dict = crop_pct if crop_pct is not None else 224 / 256 lowerCAmelCase_ :Tuple = resample lowerCAmelCase_ :List[Any] = do_rescale lowerCAmelCase_ :Optional[Any] = rescale_factor lowerCAmelCase_ :int = do_normalize lowerCAmelCase_ :Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ :Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self , __A , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray: lowerCAmelCase_ :List[Any] = get_size_dict(__A , default_to_square=__A ) if "shortest_edge" not in size: raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" ) lowerCAmelCase_ :Dict = size["""shortest_edge"""] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCAmelCase_ :List[Any] = int(shortest_edge / crop_pct ) lowerCAmelCase_ :int = get_resize_output_image_size(__A , size=__A , default_to_square=__A ) lowerCAmelCase_ :Tuple = resize(image=__A , size=__A , resample=__A , data_format=__A , **__A ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__A , size=(shortest_edge, shortest_edge) , data_format=__A , **__A ) else: # warping (no cropping) when evaluated at 384 or larger return resize( __A , size=(shortest_edge, shortest_edge) , resample=__A , data_format=__A , **__A ) def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> Optional[Any]: return rescale(__A , scale=__A , data_format=__A , **__A ) def __lowerCAmelCase ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray: return normalize(__A , mean=__A , std=__A , data_format=__A , **__A ) def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> PIL.Image.Image: lowerCAmelCase_ :Any = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ :Dict = crop_pct if crop_pct is not None else self.crop_pct lowerCAmelCase_ :Optional[int] = resample if resample is not None else self.resample lowerCAmelCase_ :Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ :int = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ :int = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ :Tuple = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ :str = image_std if image_std is not None else self.image_std lowerCAmelCase_ :str = size if size is not None else self.size lowerCAmelCase_ :Any = get_size_dict(__A , default_to_square=__A ) lowerCAmelCase_ :int = make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError("""crop_pct must be specified if size < 384.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowerCAmelCase_ :str = [to_numpy_array(__A ) for image in images] if do_resize: lowerCAmelCase_ :List[Any] = [self.resize(image=__A , size=__A , crop_pct=__A , resample=__A ) for image in images] if do_rescale: lowerCAmelCase_ :Any = [self.rescale(image=__A , scale=__A ) for image in images] if do_normalize: lowerCAmelCase_ :str = [self.normalize(image=__A , mean=__A , std=__A ) for image in images] lowerCAmelCase_ :Any = [to_channel_dimension_format(__A , __A ) for image in images] lowerCAmelCase_ :Optional[Any] = {"""pixel_values""": images} return BatchFeature(data=__A , tensor_type=__A )
84
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : Any , __A : Dict , __A : str , __A : List[Any]=1_0_2_4 , __A : Tuple=1_0_2_4 , __A : str=3.6 ): __UpperCamelCase = tokenizer __UpperCamelCase = tokenizer.bos_token_id __UpperCamelCase = dataset __UpperCamelCase = seq_length __UpperCamelCase = seq_length * chars_per_token * num_of_sequences def __iter__( self : Any ): __UpperCamelCase = iter(self.dataset ) __UpperCamelCase = True while more_examples: __UpperCamelCase , __UpperCamelCase = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(__A )['content'] ) buffer_len += len(buffer[-1] ) except StopIteration: __UpperCamelCase = False break __UpperCamelCase = tokenizer(__A , truncation=__A )['input_ids'] __UpperCamelCase = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(__A ) , self.seq_length ): __UpperCamelCase = all_token_ids[i : i + self.seq_length] if len(__A ) == self.seq_length: yield torch.tensor(__A ) def lowercase__ ( __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = {'streaming': True} __UpperCamelCase = load_dataset(args.dataset_name , split='train' , **__lowercase ) __UpperCamelCase = ConstantLengthDataset(__lowercase , __lowercase , seq_length=args.seq_length ) __UpperCamelCase = DataLoader(__lowercase , batch_size=args.batch_size ) return eval_dataloader def lowercase__ ( __lowercase : Tuple ) -> Optional[Any]: """simple docstring""" model.eval() __UpperCamelCase = [] for step, batch in enumerate(__lowercase ): with torch.no_grad(): __UpperCamelCase = model(__lowercase , labels=__lowercase ) __UpperCamelCase = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__lowercase ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break __UpperCamelCase = torch.mean(torch.cat(__lowercase ) ) try: __UpperCamelCase = torch.exp(__lowercase ) except OverflowError: __UpperCamelCase = float('inf' ) return loss.item(), perplexity.item() # Setup Accelerator a__ : int =Accelerator() # Parse configuration a__ : Dict =HfArgumentParser(EvaluationArguments) a__ : Union[str, Any] =parser.parse_args() set_seed(args.seed) # Logging a__ : List[Any] =logging.getLogger(__name__) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) # Load model and tokenizer a__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(args.model_ckpt) a__ : List[Any] =AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader a__ : Union[str, Any] =create_dataloader(args) # Prepare everything with our `accelerator`. a__ , a__ : List[str] =accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('''Evaluating and saving model after training''') a__ , a__ : Any =evaluate(args) logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
53
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Tuple = [ "SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "SwinForImageClassification", "SwinForMaskedImageModeling", "SwinModel", "SwinPreTrainedModel", "SwinBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Union[str, Any] = [ "TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSwinForImageClassification", "TFSwinForMaskedImageModeling", "TFSwinModel", "TFSwinPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging a__ : Any =logging.get_logger(__name__) a__ : Optional[Any] ={ '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict ="gpt_neo" SCREAMING_SNAKE_CASE_ : Optional[int] =["past_key_values"] SCREAMING_SNAKE_CASE_ : List[Any] ={"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Union[str, Any] , __A : Union[str, Any]=5_0_2_5_7 , __A : Any=2_0_4_8 , __A : Optional[Any]=2_0_4_8 , __A : Any=2_4 , __A : Union[str, Any]=[[["global", "local"], 1_2]] , __A : str=1_6 , __A : Optional[int]=None , __A : Union[str, Any]=2_5_6 , __A : Any="gelu_new" , __A : Dict=0.0 , __A : Optional[int]=0.0 , __A : int=0.0 , __A : List[str]=0.1 , __A : Any=1e-5 , __A : int=0.02 , __A : List[str]=True , __A : Tuple=5_0_2_5_6 , __A : Optional[Any]=5_0_2_5_6 , **__A : Optional[Any] , ): __UpperCamelCase = vocab_size __UpperCamelCase = max_position_embeddings __UpperCamelCase = hidden_size __UpperCamelCase = num_layers __UpperCamelCase = num_heads __UpperCamelCase = intermediate_size __UpperCamelCase = window_size __UpperCamelCase = activation_function __UpperCamelCase = resid_dropout __UpperCamelCase = embed_dropout __UpperCamelCase = attention_dropout __UpperCamelCase = classifier_dropout __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = initializer_range __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id __UpperCamelCase = attention_types __UpperCamelCase = self.expand_attention_types_params(__A ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=__A , eos_token_id=__A , **__A ) @staticmethod def _lowerCamelCase ( __A : Tuple ): __UpperCamelCase = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase__ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Any: """simple docstring""" import torch __UpperCamelCase = input.size() __UpperCamelCase = len(__lowercase ) __UpperCamelCase = shape[dimension] __UpperCamelCase = torch.arange(0 , __lowercase , __lowercase ) __UpperCamelCase = torch.div(sizedim - size , __lowercase , rounding_mode='floor' ) + 1 __UpperCamelCase = torch.arange(__lowercase ) + low_indices[:min_length][:, None] __UpperCamelCase = [slice(__lowercase )] * rank __UpperCamelCase = indices __UpperCamelCase = input[s] __UpperCamelCase = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Optional[int]: """simple docstring""" import torch __UpperCamelCase = torch.arange(1 , __lowercase ) __UpperCamelCase = torch.remainder(__lowercase , __lowercase ) __UpperCamelCase = remainders == 0 __UpperCamelCase = candidates[divisor_indices] __UpperCamelCase = torch.max(__lowercase ) return largest_divisor, torch.div(__lowercase , __lowercase , rounding_mode='floor' ) class snake_case ( __lowerCamelCase ): """simple docstring""" @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(__A , direction='inputs' ) __UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return common_inputs @property def _lowerCamelCase ( self : int ): return self._config.num_heads def _lowerCamelCase ( self : List[str] , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ): __UpperCamelCase = super(__A , self ).generate_dummy_inputs( __A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A ) # We need to order the input in the way they appears in the forward() __UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCamelCase = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] __UpperCamelCase = common_inputs['attention_mask'] if self.use_past: __UpperCamelCase = ordered_inputs['attention_mask'].dtype __UpperCamelCase = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 ) return ordered_inputs @property def _lowerCamelCase ( self : Dict ): return 1_3
53
0
"""simple docstring""" import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py lowerCamelCase__ = """\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\", author = \"Lin, Chin-Yew and Och, Franz Josef\", booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\", month = \"aug 23{--}aug 27\", year = \"2004\", address = \"Geneva, Switzerland\", publisher = \"COLING\", url = \"https://www.aclweb.org/anthology/C04-1072\", pages = \"501--507\", } """ lowerCamelCase__ = """\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation, the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. """ lowerCamelCase__ = """ Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 'bleu': bleu score, 'precisions': geometric mean of n-gram precisions, 'brevity_penalty': brevity penalty, 'length_ratio': ratio of lengths, 'translation_length': translation_length, 'reference_length': reference_length Examples: >>> predictions = [ ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample ... ] >>> references = [ ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references) ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric(\"bleu\") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results[\"bleu\"]) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class A__ ( datasets.Metric): def __lowerCamelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[ 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=False ): __lowerCAmelCase : Optional[Any] = compute_bleu( reference_corpus=_SCREAMING_SNAKE_CASE , translation_corpus=_SCREAMING_SNAKE_CASE , max_order=_SCREAMING_SNAKE_CASE , smooth=_SCREAMING_SNAKE_CASE ) ((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) : Dict = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
86
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa" SCREAMING_SNAKE_CASE_ : Dict =( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) SCREAMING_SNAKE_CASE_ : List[str] ="document_qa" SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"] SCREAMING_SNAKE_CASE_ : Any =["text"] def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ): if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*__A , **__A ) def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ): __UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' __UpperCamelCase = task_prompt.replace('{user_input}' , __A ) __UpperCamelCase = self.pre_processor.tokenizer( __A , add_special_tokens=__A , return_tensors='pt' ).input_ids __UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ): return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences def _lowerCamelCase ( self : Tuple , __A : List[Any] ): __UpperCamelCase = self.pre_processor.batch_decode(__A )[0] __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) __UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token __UpperCamelCase = self.pre_processor.tokenajson(__A ) return sequence["answer"]
53
0
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase_ ( _lowerCamelCase : Features): lowercase__ : List[Any] = np.inf def set_batch_size(_lowerCamelCase : FeatureType) -> None: nonlocal batch_size if isinstance(_lowerCamelCase , _lowerCamelCase): lowercase__ : Any = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS) elif isinstance(_lowerCamelCase , _lowerCamelCase): lowercase__ : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS) elif isinstance(_lowerCamelCase , _lowerCamelCase) and feature.dtype == "binary": lowercase__ : Dict = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS) _visit(_lowerCamelCase , _lowerCamelCase) return None if batch_size is np.inf else batch_size class snake_case_ ( __A ): def __init__( self : List[str] , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Union[str, Any] , ) -> Union[str, Any]: super().__init__( lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , ) lowercase__ : List[str] = path_or_paths if isinstance(lowercase_ , lowercase_ ) else {self.split: path_or_paths} lowercase__ : Optional[int] = _PACKAGED_DATASETS_MODULES["parquet"][1] lowercase__ : Union[str, Any] = Parquet( cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , ) def __UpperCamelCase ( self : Tuple ) -> Dict: # Build iterable dataset if self.streaming: lowercase__ : List[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowercase__ : Any = None lowercase__ : int = None lowercase__ : Dict = None lowercase__ : Union[str, Any] = None self.builder.download_and_prepare( download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , ) lowercase__ : Optional[Any] = self.builder.as_dataset( split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory ) return dataset class snake_case_ : def __init__( self : str , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , **lowercase_ : Optional[Any] , ) -> int: lowercase__ : Dict = dataset lowercase__ : List[Any] = path_or_buf lowercase__ : Any = batch_size or get_writer_batch_size(dataset.features ) lowercase__ : Any = parquet_writer_kwargs def __UpperCamelCase ( self : List[Any] ) -> int: lowercase__ : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , "wb+" ) as buffer: lowercase__ : str = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs ) else: lowercase__ : Tuple = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs ) return written def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : BinaryIO , lowercase_ : int , **lowercase_ : Optional[int] ) -> int: lowercase__ : List[Any] = 0 lowercase__ : List[Any] = parquet_writer_kwargs.pop("path_or_buf" , lowercase_ ) lowercase__ : List[Any] = self.dataset.features.arrow_schema lowercase__ : Optional[Any] = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_ ) for offset in logging.tqdm( range(0 , len(self.dataset ) , lowercase_ ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ): lowercase__ : Any = query_table( table=self.dataset._data , key=slice(lowercase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(lowercase_ ) written += batch.nbytes writer.close() return written
87
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
53
0
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """""" a__ = """hf-legacy""" # "hf://"" is reserved for hffs def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[str] = None , **UpperCamelCase__ : Optional[int] , ) -> str: """simple docstring""" super().__init__(self , **UpperCamelCase__ ) __magic_name__ = repo_info __magic_name__ = token __magic_name__ = None def _lowercase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" if self.dir_cache is None: __magic_name__ = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes __magic_name__ = { """name""": hf_file.rfilename, """size""": None, """type""": """file""", } self.dir_cache.update( { str(UpperCamelCase__ ): {"""name""": str(UpperCamelCase__ ), """size""": None, """type""": """directory"""} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def _lowercase ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , **UpperCamelCase__ : Dict , ) -> str: """simple docstring""" if not isinstance(self.repo_info , UpperCamelCase__ ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) __magic_name__ = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha ) return fsspec.open( UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open() def _lowercase ( self : int , UpperCamelCase__ : Tuple , **UpperCamelCase__ : Any ) -> Any: """simple docstring""" self._get_dirs() __magic_name__ = self._strip_protocol(UpperCamelCase__ ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=False , **UpperCamelCase__ : List[Any] ) -> List[Any]: """simple docstring""" self._get_dirs() __magic_name__ = PurePosixPath(path.strip("""/""" ) ) __magic_name__ = {} for p, f in self.dir_cache.items(): __magic_name__ = PurePosixPath(p.strip("""/""" ) ) __magic_name__ = p.parent if root == path: __magic_name__ = f __magic_name__ = list(paths.values() ) if detail: return out else: return sorted(f["""name"""] for f in out )
88
'''simple docstring''' import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase__ ( __lowercase : Features ) -> Optional[int]: """simple docstring""" __UpperCamelCase = np.inf def set_batch_size(__lowercase : FeatureType ) -> None: nonlocal batch_size if isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary": __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__lowercase , __lowercase ) return None if batch_size is np.inf else batch_size class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : List[str] , __A : NestedDataStructureLike[PathLike] , __A : Optional[NamedSplit] = None , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[int] = None , **__A : Dict , ): super().__init__( __A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , ) __UpperCamelCase = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths} __UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1] __UpperCamelCase = Parquet( cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , ) def _lowerCamelCase ( self : Optional[int] ): # Build iterable dataset if self.streaming: __UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None self.builder.download_and_prepare( download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , ) __UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=__A , in_memory=self.keep_in_memory ) return dataset class snake_case : """simple docstring""" def __init__( self : List[str] , __A : Dataset , __A : Union[PathLike, BinaryIO] , __A : Optional[int] = None , **__A : Dict , ): __UpperCamelCase = dataset __UpperCamelCase = path_or_buf __UpperCamelCase = batch_size or get_writer_batch_size(dataset.features ) __UpperCamelCase = parquet_writer_kwargs def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , 'wb+' ) as buffer: __UpperCamelCase = self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs ) else: __UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs ) return written def _lowerCamelCase ( self : List[str] , __A : BinaryIO , __A : int , **__A : List[str] ): __UpperCamelCase = 0 __UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __A ) __UpperCamelCase = self.dataset.features.arrow_schema __UpperCamelCase = pq.ParquetWriter(__A , schema=__A , **__A ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): __UpperCamelCase = query_table( table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__A ) written += batch.nbytes writer.close() return written
53
0
'''simple docstring''' import cmath import math def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> complex: _a : Optional[Any] = math.radians(lowerCAmelCase_ ) _a : List[Any] = math.radians(lowerCAmelCase_ ) # Convert voltage and current to rectangular form _a : Optional[Any] = cmath.rect(lowerCAmelCase_ , lowerCAmelCase_ ) _a : Tuple = cmath.rect(lowerCAmelCase_ , lowerCAmelCase_ ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
89
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def lowercase__ ( __lowercase : SplitDict ) -> int: """simple docstring""" __UpperCamelCase = split_dict._to_yaml_list() assert len(__lowercase ) == len(__lowercase ) __UpperCamelCase = SplitDict._from_yaml_list(__lowercase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump __UpperCamelCase = None # the split name of split_dict takes over the name of the split info object __UpperCamelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] ) def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" __UpperCamelCase = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
53
0
import os import sys import unittest __A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __A = os.path.join("tests", "models", "bert", "test_modeling_bert.py") __A = os.path.join("tests", "models", "blip", "test_modeling_blip.py") class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = get_test_to_tester_mapping(lowerCamelCase__ ) __lowerCamelCase = get_test_to_tester_mapping(lowerCamelCase__ ) __lowerCamelCase = {'BertModelTest': 'BertModelTester'} __lowerCamelCase = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = get_model_to_test_mapping(lowerCamelCase__ ) __lowerCamelCase = get_model_to_test_mapping(lowerCamelCase__ ) __lowerCamelCase = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } __lowerCamelCase = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = get_model_to_tester_mapping(lowerCamelCase__ ) __lowerCamelCase = get_model_to_tester_mapping(lowerCamelCase__ ) __lowerCamelCase = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } __lowerCamelCase = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) , lowerCamelCase__ )
90
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[str] ={ '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any =[ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[str] , *lowercase_ : Tuple , **lowercase_ : Tuple): '''simple docstring''' warnings.warn( '''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DonutImageProcessor instead.''' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_)
91
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a__ : str =logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str =["input_features", "attention_mask"] def __init__( self : Union[str, Any] , __A : Optional[int]=8_0 , __A : Tuple=1_6_0_0_0 , __A : Optional[Any]=8_0 , __A : Any=0.0 , __A : Any=True , __A : List[str]=True , __A : str=True , **__A : List[Any] , ): super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A ) __UpperCamelCase = num_mel_bins __UpperCamelCase = do_ceptral_normalize __UpperCamelCase = normalize_means __UpperCamelCase = normalize_vars __UpperCamelCase = True def _lowerCamelCase ( self : Union[str, Any] , __A : np.ndarray , ): __UpperCamelCase = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers __UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 ) __UpperCamelCase = ta_kaldi.fbank(__A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _lowerCamelCase ( __A : np.ndarray , __A : int , __A : Optional[bool] = True , __A : Optional[bool] = True , __A : float = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: __UpperCamelCase = x[:input_length].mean(axis=0 ) __UpperCamelCase = np.subtract(__A , __A ) if normalize_vars: __UpperCamelCase = x[:input_length].std(axis=0 ) __UpperCamelCase = np.divide(__A , __A ) if input_length < x.shape[0]: __UpperCamelCase = padding_value # make sure array is in float32 __UpperCamelCase = x.astype(np.floataa ) return x def _lowerCamelCase ( self : int , __A : List[np.ndarray] , __A : Optional[np.ndarray] = None ): __UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__A , __A , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(__A , __A ) ] def __call__( self : List[Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : Union[bool, str, PaddingStrategy] = False , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[int] = None , __A : Optional[bool] = None , **__A : Dict , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __UpperCamelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) __UpperCamelCase = is_batched_numpy or ( isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__A , np.ndarray ): __UpperCamelCase = np.asarray(__A , dtype=np.floataa ) elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __UpperCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __UpperCamelCase = [raw_speech] # extract fbank features __UpperCamelCase = [self._extract_fbank_features(__A ) for waveform in raw_speech] # convert into correct format for padding __UpperCamelCase = BatchFeature({'input_features': features} ) __UpperCamelCase = self.pad( __A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , ) # make sure list is in array format __UpperCamelCase = padded_inputs.get('input_features' ) if isinstance(input_features[0] , __A ): __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_features] __UpperCamelCase = padded_inputs.get('attention_mask' ) if attention_mask is not None: __UpperCamelCase = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __UpperCamelCase = ( np.array(__A , dtype=np.intaa ) if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD else None ) __UpperCamelCase = self.normalize( padded_inputs['input_features'] , attention_mask=__A ) if return_tensors is not None: __UpperCamelCase = padded_inputs.convert_to_tensors(__A ) return padded_inputs
53
0
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ): # Format the message. if name is None: __lowerCAmelCase = None else: __lowerCAmelCase = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}" __lowerCAmelCase = fmt.format(SCREAMING_SNAKE_CASE_ ) # Print and recurse (if needed). if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if msg is not None: print(SCREAMING_SNAKE_CASE_ ) for k in val.keys(): recursive_print(SCREAMING_SNAKE_CASE_ , val[k] , spaces + 2 ) elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): print(SCREAMING_SNAKE_CASE_ , ":" , val.size() ) else: print(SCREAMING_SNAKE_CASE_ , ":" , SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ): # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] # for compatibility with later versions of NVIDIA Megatron-LM. # The inverse operation is performed inside Megatron-LM to read checkpoints: # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 # If param is the weight tensor of the self-attention block, the returned tensor # will have to be transposed one more time to be read by HuggingFace GPT2. __lowerCAmelCase = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] __lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:] __lowerCAmelCase = param.view(*SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = param.transpose(0 , 2 ) __lowerCAmelCase = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] __lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:] __lowerCAmelCase = param.view(*SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = param.transpose(0 , 1 ).contiguous() __lowerCAmelCase = param.view(*SCREAMING_SNAKE_CASE_ ) return param def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ): # The converted output model. __lowerCAmelCase = {} # old versions did not store training args __lowerCAmelCase = input_state_dict.get("args" , SCREAMING_SNAKE_CASE_ ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) __lowerCAmelCase = ds_args.padded_vocab_size __lowerCAmelCase = ds_args.max_position_embeddings __lowerCAmelCase = ds_args.hidden_size __lowerCAmelCase = ds_args.num_layers __lowerCAmelCase = ds_args.num_attention_heads __lowerCAmelCase = ds_args.ffn_hidden_size # pprint(config) # The number of heads. __lowerCAmelCase = config.n_head # The hidden_size per head. __lowerCAmelCase = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): __lowerCAmelCase = input_state_dict["checkpoint_version"] else: __lowerCAmelCase = 0.0 # The model. __lowerCAmelCase = input_state_dict["model"] # The language model. __lowerCAmelCase = model["language_model"] # The embeddings. __lowerCAmelCase = lm["embedding"] # The word embeddings. __lowerCAmelCase = embeddings["word_embeddings"]["weight"] # Truncate the embedding table to vocab_size rows. __lowerCAmelCase = word_embeddings[: config.vocab_size, :] __lowerCAmelCase = word_embeddings # The position embeddings. __lowerCAmelCase = embeddings["position_embeddings"]["weight"] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] __lowerCAmelCase = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( F"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" ) # Store the position embeddings. __lowerCAmelCase = pos_embeddings # The transformer. __lowerCAmelCase = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"] # The regex to extract layer names. __lowerCAmelCase = re.compile(R"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" ) # The simple map of names for "automated" rules. __lowerCAmelCase = { "attention.dense": ".attn.c_proj.", "self_attention.dense": ".attn.c_proj.", "mlp.dense_h_to_4h": ".mlp.c_fc.", "mlp.dense_4h_to_h": ".mlp.c_proj.", } # Extract the layers. for key, val in transformer.items(): # Match the name. __lowerCAmelCase = layer_re.match(SCREAMING_SNAKE_CASE_ ) # Stop if that's not a layer if m is None: break # The index of the layer. __lowerCAmelCase = int(m.group(1 ) ) # The name of the operation. __lowerCAmelCase = m.group(2 ) # Is it a weight or a bias? __lowerCAmelCase = m.group(3 ) # The name of the layer. __lowerCAmelCase = F"""transformer.h.{layer_idx}""" # For layernorm(s), simply store the layer norm. if op_name.endswith("layernorm" ): __lowerCAmelCase = "ln_1" if op_name.startswith("input" ) else "ln_2" __lowerCAmelCase = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. __lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = causal_mask # Insert a "dummy" tensor for masked_bias. __lowerCAmelCase = torch.tensor(-1E4 , dtype=torch.floataa ) __lowerCAmelCase = masked_bias __lowerCAmelCase = fix_query_key_value_ordering(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. __lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous() # Store. __lowerCAmelCase = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": __lowerCAmelCase = fix_query_key_value_ordering(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Store. No change of shape. __lowerCAmelCase = out_val # Transpose the weights. elif weight_or_bias == "weight": __lowerCAmelCase = megatron_to_transformers[op_name] __lowerCAmelCase = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": __lowerCAmelCase = megatron_to_transformers[op_name] __lowerCAmelCase = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. __lowerCAmelCase = transformer["final_layernorm.weight"] __lowerCAmelCase = transformer["final_layernorm.bias"] # For LM head, transformers' wants the matrix to weight embeddings. __lowerCAmelCase = word_embeddings # It should be done! return output_state_dict def _a ( ): # Create the argument parser. __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--print-checkpoint-structure" , action="store_true" ) parser.add_argument( "path_to_checkpoint" , type=SCREAMING_SNAKE_CASE_ , help="Path to the checkpoint file (.zip archive or direct .pt file)" , ) parser.add_argument( "--config_file" , default="" , type=SCREAMING_SNAKE_CASE_ , help="An optional config json file describing the pre-trained model." , ) __lowerCAmelCase = parser.parse_args() # Extract the basename. __lowerCAmelCase = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(F"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" ) if args.path_to_checkpoint.endswith(".zip" ): with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint: with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict: __lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" ) else: __lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location="cpu" ) __lowerCAmelCase = input_state_dict.get("args" , SCREAMING_SNAKE_CASE_ ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: __lowerCAmelCase = "gelu_fast" elif ds_args.openai_gelu: __lowerCAmelCase = "gelu_new" else: __lowerCAmelCase = "gelu" else: # in the very early days this used to be "gelu_new" __lowerCAmelCase = "gelu_new" # Spell out all parameters in case the defaults change. __lowerCAmelCase = GPTaConfig( vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=SCREAMING_SNAKE_CASE_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=SCREAMING_SNAKE_CASE_ , summary_activation=SCREAMING_SNAKE_CASE_ , summary_proj_to_labels=SCREAMING_SNAKE_CASE_ , summary_first_dropout=0.1 , scale_attn_weights=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=5_02_56 , eos_token_id=5_02_56 , ) else: __lowerCAmelCase = GPTaConfig.from_json_file(args.config_file ) __lowerCAmelCase = ["GPT2LMHeadModel"] # Convert. print("Converting" ) __lowerCAmelCase = convert_megatron_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: __lowerCAmelCase = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": __lowerCAmelCase = "gpt2" elif tokenizer_type == "PretrainedFromHF": __lowerCAmelCase = ds_args.tokenizer_name_or_path else: raise ValueError(F"""Unrecognized tokenizer_type {tokenizer_type}""" ) else: __lowerCAmelCase = "gpt2" __lowerCAmelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = type(SCREAMING_SNAKE_CASE_ ).__name__ __lowerCAmelCase = tokenizer_class # Store the config to file. print("Saving config" ) config.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Save tokenizer based on args print(F"""Adding {tokenizer_class} tokenizer files""" ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Store the state_dict to file. __lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , "pytorch_model.bin" ) print(F"""Saving checkpoint to \"{output_checkpoint_file}\"""" ) torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
92
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : List[Any] =logging.get_logger(__name__) a__ : List[Any] ={ '''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model" def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ): super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = project_dim class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model" def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ): super().__init__(**__A ) __UpperCamelCase = hidden_size __UpperCamelCase = intermediate_size __UpperCamelCase = projection_dim __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = num_channels __UpperCamelCase = patch_size __UpperCamelCase = image_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = attention_dropout __UpperCamelCase = layer_norm_eps __UpperCamelCase = hidden_act @classmethod def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ): cls._set_token_in_kwargs(__A ) __UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('model_type' ) == "altclip": __UpperCamelCase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] ="altclip" SCREAMING_SNAKE_CASE_ : Optional[int] =True def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). __UpperCamelCase = kwargs.pop('text_config_dict' , __A ) __UpperCamelCase = kwargs.pop('vision_config_dict' , __A ) super().__init__(**__A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __UpperCamelCase = {} # This is the complete result when using `text_config_dict`. __UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __UpperCamelCase = {} # This is the complete result when using `vision_config_dict`. __UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __UpperCamelCase = { str(__A ): value for key, value in _vision_config_dict['id2label'].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __UpperCamelCase = {} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' ) if vision_config is None: __UpperCamelCase = {} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' ) __UpperCamelCase = AltCLIPTextConfig(**__A ) __UpperCamelCase = AltCLIPVisionConfig(**__A ) __UpperCamelCase = projection_dim __UpperCamelCase = logit_scale_init_value __UpperCamelCase = 1.0 @classmethod def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = copy.deepcopy(self.__dict__ ) __UpperCamelCase = self.text_config.to_dict() __UpperCamelCase = self.vision_config.to_dict() __UpperCamelCase = self.__class__.model_type return output
53
0
'''simple docstring''' from __future__ import annotations import requests def snake_case_ ( __SCREAMING_SNAKE_CASE : str ): """simple docstring""" lowercase_ : Any = F'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty''' return requests.get(__SCREAMING_SNAKE_CASE ).json() def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 10 ): """simple docstring""" lowercase_ : int = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty''' lowercase_ : Tuple = requests.get(__SCREAMING_SNAKE_CASE ).json()[:max_stories] return [get_hackernews_story(__SCREAMING_SNAKE_CASE ) for story_id in story_ids] def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 10 ): """simple docstring""" lowercase_ : Dict = hackernews_top_stories(__SCREAMING_SNAKE_CASE ) return "\n".join('''* [{title}]({url})'''.format(**__SCREAMING_SNAKE_CASE ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
93
'''simple docstring''' import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Any ) -> Optional[Any]: """simple docstring""" with open(__lowercase ) as metadata_file: __UpperCamelCase = json.load(__lowercase ) __UpperCamelCase = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['model_config'] ) # Load in the weights from the checkpoint_path __UpperCamelCase = torch.load(__lowercase , map_location='cpu' ) # Load the entity vocab file __UpperCamelCase = load_entity_vocab(__lowercase ) __UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks __UpperCamelCase = AddedToken('<ent>' , lstrip=__lowercase , rstrip=__lowercase ) __UpperCamelCase = AddedToken('<ent2>' , lstrip=__lowercase , rstrip=__lowercase ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(__lowercase ) with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(__lowercase , __lowercase ) __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase ) # Initialize the embeddings of the special tokens __UpperCamelCase = state_dict['embeddings.word_embeddings.weight'] __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 ) __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 ) __UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __UpperCamelCase = F'''encoder.layer.{layer_index}.attention.self.''' __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight'] __UpperCamelCase = entity_emb[entity_vocab['[MASK]']] __UpperCamelCase = LukeModel(config=__lowercase ).eval() __UpperCamelCase , __UpperCamelCase = model.load_state_dict(__lowercase , strict=__lowercase ) if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'''Missing keys {', '.join(__lowercase )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )): raise ValueError( 'Unexpected keys' F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' ) # Check outputs __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase , task='entity_classification' ) __UpperCamelCase = ( 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the' ' new world number one avoid a humiliating second- round exit at Wimbledon .' ) __UpperCamelCase = (39, 42) __UpperCamelCase = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors='pt' ) __UpperCamelCase = model(**__lowercase ) # Verify word hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 42, 1024) ) __UpperCamelCase = torch.tensor( [[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] ) else: # base __UpperCamelCase = torch.Size((1, 42, 768) ) __UpperCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 1, 1024) ) __UpperCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] ) else: # base __UpperCamelCase = torch.Size((1, 1, 768) ) __UpperCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(__lowercase ) ) model.save_pretrained(__lowercase ) def lowercase__ ( __lowercase : Dict ) -> List[str]: """simple docstring""" __UpperCamelCase = {} with open(__lowercase , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(__lowercase ): __UpperCamelCase , __UpperCamelCase = line.rstrip().split('\t' ) __UpperCamelCase = index return entity_vocab if __name__ == "__main__": a__ : Any =argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) a__ : str =parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
53
0
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = (DDPMParallelScheduler,) def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ): a :List[Any] = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**_lowerCamelCase ) return config def SCREAMING_SNAKE_CASE__ ( self ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): self.check_over_configs(thresholding=_lowerCamelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , ) def SCREAMING_SNAKE_CASE__ ( self ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): for t in [0, 500, 999]: self.check_over_forward(time_step=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Any = self.scheduler_classes[0] a :Optional[Any] = self.get_scheduler_config() a :Any = scheduler_class(**_lowerCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def SCREAMING_SNAKE_CASE__ ( self ): a :Tuple = self.scheduler_classes[0] a :Any = self.get_scheduler_config() a :Union[str, Any] = scheduler_class(**_lowerCamelCase ) a :Dict = len(_lowerCamelCase ) a :Tuple = self.dummy_model() a :Optional[Any] = self.dummy_sample_deter a :str = self.dummy_sample_deter + 0.1 a :Optional[Any] = self.dummy_sample_deter - 0.1 a :int = samplea.shape[0] a :List[str] = torch.stack([samplea, samplea, samplea] , dim=0 ) a :List[str] = torch.arange(_lowerCamelCase )[0:3, None].repeat(1 , _lowerCamelCase ) a :str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) a :List[Any] = scheduler.batch_step_no_noise(_lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) a :str = torch.sum(torch.abs(_lowerCamelCase ) ) a :str = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1153.1833 ) < 1e-2 assert abs(result_mean.item() - 0.5005 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.scheduler_classes[0] a :Any = self.get_scheduler_config() a :Dict = scheduler_class(**_lowerCamelCase ) a :Tuple = len(_lowerCamelCase ) a :Optional[int] = self.dummy_model() a :Optional[Any] = self.dummy_sample_deter a :Optional[int] = torch.manual_seed(0 ) for t in reversed(range(_lowerCamelCase ) ): # 1. predict noise residual a :Dict = model(_lowerCamelCase , _lowerCamelCase ) # 2. predict previous mean of sample x_t-1 a :int = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample a :Any = pred_prev_sample a :int = torch.sum(torch.abs(_lowerCamelCase ) ) a :Dict = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self ): a :Union[str, Any] = self.scheduler_classes[0] a :List[Any] = self.get_scheduler_config(prediction_type='''v_prediction''' ) a :Any = scheduler_class(**_lowerCamelCase ) a :Tuple = len(_lowerCamelCase ) a :str = self.dummy_model() a :List[str] = self.dummy_sample_deter a :Any = torch.manual_seed(0 ) for t in reversed(range(_lowerCamelCase ) ): # 1. predict noise residual a :Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) # 2. predict previous mean of sample x_t-1 a :str = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample a :List[str] = pred_prev_sample a :Optional[int] = torch.sum(torch.abs(_lowerCamelCase ) ) a :Any = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self ): a :Union[str, Any] = self.scheduler_classes[0] a :str = self.get_scheduler_config() a :Tuple = scheduler_class(**_lowerCamelCase ) a :int = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_lowerCamelCase ) a :List[str] = scheduler.timesteps for i, timestep in enumerate(_lowerCamelCase ): if i == len(_lowerCamelCase ) - 1: a :str = -1 else: a :List[str] = timesteps[i + 1] a :Union[str, Any] = scheduler.previous_timestep(_lowerCamelCase ) a :List[str] = prev_t.item() self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[int] = self.scheduler_classes[0] a :List[Any] = self.get_scheduler_config() a :Dict = scheduler_class(**_lowerCamelCase ) a :int = [100, 87, 50, 51, 0] with self.assertRaises(_lowerCamelCase , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :Tuple = self.scheduler_classes[0] a :Any = self.get_scheduler_config() a :str = scheduler_class(**_lowerCamelCase ) a :Tuple = [100, 87, 50, 1, 0] a :Optional[Any] = len(_lowerCamelCase ) with self.assertRaises(_lowerCamelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=_lowerCamelCase , timesteps=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self ): a :List[str] = self.scheduler_classes[0] a :Optional[Any] = self.get_scheduler_config() a :Dict = scheduler_class(**_lowerCamelCase ) a :int = [scheduler.config.num_train_timesteps] with self.assertRaises( _lowerCamelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=_lowerCamelCase )
94
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class snake_case ( __lowerCamelCase ): """simple docstring""" def _lowerCamelCase ( self : Any ): __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = 8 # DPR tok __UpperCamelCase = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __UpperCamelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok __UpperCamelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) ) __UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __UpperCamelCase = {'unk_token': '<unk>'} __UpperCamelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__A ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__A ) ) def _lowerCamelCase ( self : Tuple ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Optional[int] ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Union[str, Any] ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def _lowerCamelCase ( self : str ): shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _lowerCamelCase ( self : Any , __A : bool ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , ) if from_disk: __UpperCamelCase = os.path.join(self.tmpdirname , 'dataset' ) __UpperCamelCase = os.path.join(self.tmpdirname , 'index.faiss' ) dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) ) dataset.drop_index('embeddings' ) dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) ) del dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , ) return retriever def _lowerCamelCase ( self : int ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) __UpperCamelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' ) dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' ) pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) ) __UpperCamelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' ) __UpperCamelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset} pickle.dump(__A , open(__A , 'wb' ) ) __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , ) __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = self.get_dummy_dataset() retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_legacy_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] ) self.assertEqual(len(doc_dicts[0]['text'] ) , __A ) self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Optional[Any] ): import torch __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , np.ndarray ) __UpperCamelCase = retriever( __A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( # noqa: F841 out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], out['doc_ids'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dpr_ctx_encoder_tokenizer() __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) retriever.set_ctx_encoder_tokenizer(__A ) __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) self.assertEqual( len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
53
0
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : Any = logging.get_logger() @dataclass class __lowerCAmelCase : _lowercase : nn.Module _lowercase : List[nn.Module] = field(default_factory=UpperCamelCase__) _lowercase : list = field(default_factory=UpperCamelCase__) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: '''simple docstring''' a__ : Tuple =len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(lowerCAmelCase__ ) def __call__( self , lowerCAmelCase__ ) -> List[Any]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowerCAmelCase__ ) [x.remove() for x in self.handles] return self @property def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class __lowerCAmelCase : _lowercase : nn.Module _lowercase : nn.Module _lowercase : int = 0 _lowercase : List = field(default_factory=UpperCamelCase__) _lowercase : List = field(default_factory=UpperCamelCase__) def __call__( self , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' a__ : Any =Tracker(self.dest )(lowerCAmelCase__ ).parametrized a__ : List[str] =Tracker(self.src )(lowerCAmelCase__ ).parametrized a__ : Tuple =list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) ) a__ : Any =list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) ) if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise Exception( F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while''' F''' destination module has {len(lowerCAmelCase__ )}.''' ) for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : ResNetConfig , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : bool = True ): """simple docstring""" print(f'''Converting {name}...''' ) with torch.no_grad(): a__ : Tuple =timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE ).eval() a__ : str =ResNetForImageClassification(SCREAMING_SNAKE_CASE ).eval() a__ : List[str] =ModuleTransfer(src=SCREAMING_SNAKE_CASE , dest=SCREAMING_SNAKE_CASE ) a__ : Optional[Any] =torch.randn((1, 3, 224, 224) ) module_transfer(SCREAMING_SNAKE_CASE ) assert torch.allclose(from_model(SCREAMING_SNAKE_CASE ) , our_model(SCREAMING_SNAKE_CASE ).logits ), "The model logits don't match the original one." a__ : Union[str, Any] =f'''resnet{"-".join(name.split("resnet" ) )}''' print(SCREAMING_SNAKE_CASE ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE , ) # we can use the convnext one a__ : Optional[int] =AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE , ) print(f'''Pushed {checkpoint_name}''' ) def _A ( SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : bool = True ): """simple docstring""" a__ : int ="imagenet-1k-id2label.json" a__ : List[str] =1_000 a__ : List[Any] =(1, num_labels) a__ : str ="huggingface/label-files" a__ : Optional[int] =num_labels a__ : Union[str, Any] =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) a__ : str ={int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} a__ : int =idalabel a__ : Dict ={v: k for k, v in idalabel.items()} a__ : str =partial(SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE ) a__ : Optional[Any] ={ "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), } if model_name: convert_weight_and_push(SCREAMING_SNAKE_CASE , names_to_config[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return config, expected_shape if __name__ == "__main__": UpperCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) UpperCAmelCase : str = parser.parse_args() UpperCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
95
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[Any] ={ '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] =[ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys a__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowercase__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["""NllbTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["""NllbTokenizerFast"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
96
'''simple docstring''' import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any]=False ) -> Tuple: """simple docstring""" try: __UpperCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __UpperCamelCase = default else: # KEY is set, convert it to True or False. try: __UpperCamelCase = strtobool(__lowercase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value a__ : str =parse_flag_from_env('''RUN_SLOW''', default=False) a__ : Union[str, Any] =parse_flag_from_env('''RUN_REMOTE''', default=False) a__ : List[str] =parse_flag_from_env('''RUN_LOCAL''', default=True) a__ : Optional[int] =parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression a__ : Any =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') a__ : Optional[int] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') a__ : List[str] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio a__ : Any =pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam a__ : Tuple =pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility a__ : Union[str, Any] =pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows a__ : int =pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase__ ( __lowercase : Optional[Any] ) -> Optional[int]: """simple docstring""" try: import faiss # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires faiss' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Any: """simple docstring""" try: import regex # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires regex' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Tuple ) -> List[Any]: """simple docstring""" try: import elasticsearch # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires elasticsearch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Tuple: """simple docstring""" try: import sqlalchemy # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires sqlalchemy' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[str] ) -> List[str]: """simple docstring""" if not config.TORCH_AVAILABLE: __UpperCamelCase = unittest.skip('test requires PyTorch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[Any] ) -> List[str]: """simple docstring""" if not config.TF_AVAILABLE: __UpperCamelCase = unittest.skip('test requires TensorFlow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : int ) -> Union[str, Any]: """simple docstring""" if not config.JAX_AVAILABLE: __UpperCamelCase = unittest.skip('test requires JAX' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> Optional[Any]: """simple docstring""" if not config.PIL_AVAILABLE: __UpperCamelCase = unittest.skip('test requires Pillow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : int ) -> int: """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> int: """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> Any: """simple docstring""" def _require_spacy_model(__lowercase : Any ): try: import spacy # noqa F401 spacy.load(__lowercase ) except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(__lowercase ) )(__lowercase ) else: return test_case return _require_spacy_model def lowercase__ ( __lowercase : Union[str, Any] ) -> str: """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]: """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: __UpperCamelCase = unittest.skip('test is slow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_local_tests or _run_local_tests == 0: __UpperCamelCase = unittest.skip('test is local' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: __UpperCamelCase = unittest.skip('test is packaged' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Any: """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: __UpperCamelCase = unittest.skip('test requires remote' )(__lowercase ) return test_case def lowercase__ ( *__lowercase : Optional[Any] ) -> Tuple: """simple docstring""" def decorate(cls : int ): for name, fn in cls.__dict__.items(): if callable(__lowercase ) and name.startswith('test' ): for decorator in decorators: __UpperCamelCase = decorator(__lowercase ) setattr(cls , __lowercase , __lowercase ) return cls return decorate class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =0 SCREAMING_SNAKE_CASE_ : List[Any] =1 SCREAMING_SNAKE_CASE_ : Union[str, Any] =2 @contextmanager def lowercase__ ( __lowercase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , __lowercase : Dict=1e-16 ) -> List[Any]: """simple docstring""" __UpperCamelCase = requests.Session().request def timeout_request(__lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , **__lowercase : List[str] ): # Change the url to an invalid url so that the connection hangs __UpperCamelCase = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) __UpperCamelCase = timeout try: return online_request(__lowercase , __lowercase , **__lowercase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __UpperCamelCase = url __UpperCamelCase = e.args[0] __UpperCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]''' ),) __UpperCamelCase = (max_retry_error,) raise def raise_connection_error(__lowercase : int , __lowercase : List[str] , **__lowercase : Union[str, Any] ): raise requests.ConnectionError('Offline mode is enabled.' , request=__lowercase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , __lowercase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , __lowercase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , __lowercase ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def lowercase__ ( *__lowercase : Any , **__lowercase : Dict ) -> Dict: """simple docstring""" __UpperCamelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__lowercase , **__lowercase ) as tmp_dir: try: os.chdir(__lowercase ) yield finally: os.chdir(__lowercase ) @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase__ ( __lowercase : List[str] , __lowercase : int ) -> Union[str, Any]: """simple docstring""" return deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(__lowercase : List[Any] , *__lowercase : Tuple , **__lowercase : Union[str, Any] ): try: return func(*__lowercase , **__lowercase ) except HTTPError as err: if str(__lowercase ).startswith('500' ) or str(__lowercase ).startswith('502' ): pytest.xfail(str(__lowercase ) ) raise err return decorator.decorator(_wrapper , __lowercase ) class snake_case : """simple docstring""" def __init__( self : int , __A : Any , __A : str , __A : List[Any] ): __UpperCamelCase = returncode __UpperCamelCase = stdout __UpperCamelCase = stderr async def lowercase__ ( __lowercase : Any , __lowercase : Optional[int] ) -> str: """simple docstring""" while True: __UpperCamelCase = await stream.readline() if line: callback(__lowercase ) else: break async def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : Any=None , __lowercase : Optional[Any]=None , __lowercase : int=False , __lowercase : List[Any]=False ) -> _RunOutput: """simple docstring""" if echo: print('\nRunning: ' , ' '.join(__lowercase ) ) __UpperCamelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __UpperCamelCase = [] __UpperCamelCase = [] def tee(__lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Tuple="" ): __UpperCamelCase = line.decode('utf-8' ).rstrip() sink.append(__lowercase ) if not quiet: print(__lowercase , __lowercase , file=__lowercase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label='stderr:' ) ), ] , timeout=__lowercase , ) return _RunOutput(await p.wait() , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : int=180 , __lowercase : int=False , __lowercase : str=True ) -> _RunOutput: """simple docstring""" __UpperCamelCase = asyncio.get_event_loop() __UpperCamelCase = loop.run_until_complete( _stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) ) __UpperCamelCase = ' '.join(__lowercase ) if result.returncode > 0: __UpperCamelCase = '\n'.join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' ) return result def lowercase__ ( ) -> List[str]: """simple docstring""" __UpperCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) __UpperCamelCase = re.sub(R'^gw' , '' , __lowercase , 0 , re.M ) return int(__lowercase ) def lowercase__ ( ) -> List[Any]: """simple docstring""" __UpperCamelCase = 29500 __UpperCamelCase = pytest_xdist_worker_id() return port + uniq_delta
53
0
'''simple docstring''' import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def a ( __a ) -> Dict: '''simple docstring''' UpperCamelCase__ :List[Any] = [ '''decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__a , __a ) def a ( __a ) -> List[Any]: '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ :int = emb.weight.shape UpperCamelCase__ :Tuple = nn.Linear(__a , __a , bias=__a ) UpperCamelCase__ :Dict = emb.weight.data return lin_layer def a ( __a ) -> str: '''simple docstring''' UpperCamelCase__ :int = torch.load(__a , map_location='''cpu''' ) UpperCamelCase__ :Optional[int] = Namespace(**checkpoint['''cfg''']['''model'''] ) UpperCamelCase__ :Dict = checkpoint['''model'''] remove_ignore_keys_(__a ) UpperCamelCase__ :Optional[int] = state_dict['''decoder.embed_tokens.weight'''].shape[0] UpperCamelCase__ :Optional[int] = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()} UpperCamelCase__ :Union[str, Any] = XGLMConfig( vocab_size=__a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) UpperCamelCase__ :Any = XGLMForCausalLM(__a ) UpperCamelCase__ :Dict = model.load_state_dict(__a , strict=__a ) print(__a ) UpperCamelCase__ :Optional[int] = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') __snake_case = parser.parse_args() __snake_case = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
97
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys a__ : Tuple ='''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
53
0
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( __UpperCAmelCase ): """simple docstring""" snake_case__ = ["image_processor", "tokenizer"] snake_case__ = "Pix2StructImageProcessor" snake_case__ = ("T5Tokenizer", "T5TokenizerFast") def __init__( self : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Union[str, Any] ): UpperCAmelCase__ = False super().__init__(lowerCamelCase__ ,lowerCamelCase__ ) def __call__( self : Optional[int] ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False ,lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[int] = 2_048 ,lowerCamelCase__ : int = 0 ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCamelCase__ : Union[str, Any] ,): if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None and not self.image_processor.is_vqa: UpperCAmelCase__ = self.tokenizer UpperCAmelCase__ = self.tokenizer( text=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,stride=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_overflowing_tokens=lowerCamelCase__ ,return_special_tokens_mask=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_length=lowerCamelCase__ ,verbose=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ ,) return text_encoding if not self.image_processor.is_vqa: # add pixel_values UpperCAmelCase__ = self.image_processor( lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,max_patches=lowerCamelCase__ ,**lowerCamelCase__ ) else: # add pixel_values and bbox UpperCAmelCase__ = self.image_processor( lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,max_patches=lowerCamelCase__ ,header_text=lowerCamelCase__ ,**lowerCamelCase__ ) if text is not None and not self.image_processor.is_vqa: UpperCAmelCase__ = self.tokenizer( text=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,stride=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_overflowing_tokens=lowerCamelCase__ ,return_special_tokens_mask=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_length=lowerCamelCase__ ,verbose=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ ,) if "attention_mask" in text_encoding: UpperCAmelCase__ = text_encoding.pop('attention_mask' ) if "input_ids" in text_encoding: UpperCAmelCase__ = text_encoding.pop('input_ids' ) else: UpperCAmelCase__ = None if text_encoding is not None: encoding_image_processor.update(lowerCamelCase__ ) return encoding_image_processor def __lowerCAmelCase ( self : Optional[int] ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : Union[str, Any] ): return self.tokenizer.batch_decode(*lowerCamelCase__ ,**lowerCamelCase__ ) def __lowerCAmelCase ( self : str ,*lowerCamelCase__ : str ,**lowerCamelCase__ : Tuple ): return self.tokenizer.decode(*lowerCamelCase__ ,**lowerCamelCase__ ) @property def __lowerCAmelCase ( self : Dict ): UpperCAmelCase__ = self.tokenizer.model_input_names UpperCAmelCase__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
98
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Tuple ) -> Tuple: """simple docstring""" return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :] def lowercase__ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] , __lowercase : List[str]="attention" ) -> Optional[Any]: """simple docstring""" __UpperCamelCase = __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] ) __UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] ) __UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] ) __UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] ) __UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : int , __lowercase : List[Any]=False ) -> Optional[Any]: """simple docstring""" if split_mlp_wi: __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :] __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :] __UpperCamelCase = (wi_a, wi_a) else: __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :] __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :] return wi, wo def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] ) -> str: """simple docstring""" return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i] def lowercase__ ( __lowercase : dict , *, __lowercase : int , __lowercase : bool , __lowercase : bool = False ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = traverse_util.flatten_dict(variables['target'] ) __UpperCamelCase = {'/'.join(__lowercase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old print('Split MLP:' , __lowercase ) __UpperCamelCase = collections.OrderedDict() # Shared embeddings. __UpperCamelCase = old['token_embedder/embedding'] # Encoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'encoder' , 'attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 1 (MLP). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_mlp_layer_norm' ) __UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'encoder' , __lowercase ) __UpperCamelCase = layer_norm if split_mlp_wi: __UpperCamelCase = wi[0].T __UpperCamelCase = wi[1].T else: __UpperCamelCase = wi.T __UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , __lowercase , 'encoder' ).T __UpperCamelCase = old['encoder/encoder_norm/scale'] if not scalable_attention: __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , 0 , 'encoder' ).T __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , 0 , 'decoder' ).T if not is_encoder_only: # Decoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_self_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'self_attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 1 (Cross Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_cross_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'encoder_decoder_attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 2 (MLP). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_mlp_layer_norm' ) __UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'decoder' , __lowercase ) __UpperCamelCase = layer_norm if split_mlp_wi: __UpperCamelCase = wi[0].T __UpperCamelCase = wi[1].T else: __UpperCamelCase = wi.T __UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase = tax_relpos_bias_lookup(__lowercase , __lowercase , 'decoder' ).T __UpperCamelCase = old['decoder/decoder_norm/scale'] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __UpperCamelCase = old['decoder/logits_dense/kernel'].T return new def lowercase__ ( __lowercase : Optional[Any] , __lowercase : bool ) -> int: """simple docstring""" __UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __UpperCamelCase = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __UpperCamelCase = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) __UpperCamelCase = state_dict['shared.weight'] return state_dict def lowercase__ ( __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = checkpoints.load_tax_checkpoint(__lowercase ) __UpperCamelCase = convert_tax_to_pytorch( __lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase , scalable_attention=__lowercase ) __UpperCamelCase = make_state_dict(__lowercase , __lowercase ) model.load_state_dict(__lowercase , strict=__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : bool = False , __lowercase : bool = False , ) -> Optional[int]: """simple docstring""" __UpperCamelCase = MTaConfig.from_json_file(__lowercase ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __UpperCamelCase = UMTaEncoderModel(__lowercase ) else: __UpperCamelCase = UMTaForConditionalGeneration(__lowercase ) # Load weights from tf checkpoint load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(__lowercase ) # Verify that we can load the checkpoint. model.from_pretrained(__lowercase ) print('Done' ) if __name__ == "__main__": a__ : List[Any] =argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) parser.add_argument( '''--scalable_attention''', action='''store_true''', help='''Whether the model uses scaled attention (umt5 model)''', default=False, ) a__ : List[str] =parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
53
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowercase : List[str] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : int = ["""NllbTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = ["""NllbTokenizerFast"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
99
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ : List[Any] ="BlipImageProcessor" SCREAMING_SNAKE_CASE_ : Optional[int] =("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , __A : Optional[int] , __A : List[Any] ): __UpperCamelCase = False super().__init__(__A , __A ) __UpperCamelCase = self.image_processor def __call__( self : List[Any] , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ): if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: __UpperCamelCase = self.tokenizer __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) return text_encoding # add pixel_values __UpperCamelCase = self.image_processor(__A , return_tensors=__A ) if text is not None: __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) else: __UpperCamelCase = None if text_encoding is not None: encoding_image_processor.update(__A ) return encoding_image_processor def _lowerCamelCase ( self : List[Any] , *__A : Dict , **__A : Optional[int] ): return self.tokenizer.batch_decode(*__A , **__A ) def _lowerCamelCase ( self : List[Any] , *__A : List[str] , **__A : Dict ): return self.tokenizer.decode(*__A , **__A ) @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.tokenizer.model_input_names __UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
53
0
"""simple docstring""" from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __magic_name__ = logging.get_logger(__name__) __magic_name__ = Dict[str, Any] __magic_name__ = List[Prediction] @add_end_docstrings(__a ) class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__): super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__) if self.framework == "tf": raise ValueError(f"The {self.__class__} is only available in PyTorch.") requires_backends(self , """vision""") self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items())) def snake_case_ ( self , **lowerCAmelCase__): __SCREAMING_SNAKE_CASE = {} if "threshold" in kwargs: __SCREAMING_SNAKE_CASE = kwargs["""threshold"""] return {}, {}, postprocess_kwargs def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__): return super().__call__(*lowerCAmelCase__ , **lowerCAmelCase__) def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = load_image(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = torch.IntTensor([[image.height, image.width]]) __SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors="""pt""") if self.tokenizer is not None: __SCREAMING_SNAKE_CASE = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""") __SCREAMING_SNAKE_CASE = target_size return inputs def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = model_inputs.pop("""target_size""") __SCREAMING_SNAKE_CASE = self.model(**lowerCAmelCase__) __SCREAMING_SNAKE_CASE = outputs.__class__({"""target_size""": target_size, **outputs}) if self.tokenizer is not None: __SCREAMING_SNAKE_CASE = model_inputs["""bbox"""] return model_outputs def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=0.9): __SCREAMING_SNAKE_CASE = model_outputs["""target_size"""] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = target_size[0].tolist() def unnormalize(lowerCAmelCase__): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1_0_0_0), (height * bbox[1] / 1_0_0_0), (width * bbox[2] / 1_0_0_0), (height * bbox[3] / 1_0_0_0), ])) __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = model_outputs["""logits"""].squeeze(0).softmax(dim=-1).max(dim=-1) __SCREAMING_SNAKE_CASE = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] __SCREAMING_SNAKE_CASE = [unnormalize(lowerCAmelCase__) for bbox in model_outputs["""bbox"""].squeeze(0)] __SCREAMING_SNAKE_CASE = ["""score""", """label""", """box"""] __SCREAMING_SNAKE_CASE = [dict(zip(lowerCAmelCase__ , lowerCAmelCase__)) for vals in zip(scores.tolist() , lowerCAmelCase__ , lowerCAmelCase__) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel __SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = raw_annotations[0] __SCREAMING_SNAKE_CASE = raw_annotation["""scores"""] __SCREAMING_SNAKE_CASE = raw_annotation["""labels"""] __SCREAMING_SNAKE_CASE = raw_annotation["""boxes"""] __SCREAMING_SNAKE_CASE = scores.tolist() __SCREAMING_SNAKE_CASE = [self.model.config.idalabel[label.item()] for label in labels] __SCREAMING_SNAKE_CASE = [self._get_bounding_box(lowerCAmelCase__) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] __SCREAMING_SNAKE_CASE = ["""score""", """label""", """box"""] __SCREAMING_SNAKE_CASE = [ dict(zip(lowerCAmelCase__ , lowerCAmelCase__)) for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""]) ] return annotation def snake_case_ ( self , lowerCAmelCase__): if self.framework != "pt": raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""") __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = box.int().tolist() __SCREAMING_SNAKE_CASE = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
100
'''simple docstring''' from __future__ import annotations from typing import Any class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case : """simple docstring""" def __init__( self : List[Any] , __A : Any ): __UpperCamelCase = data __UpperCamelCase = None def __iter__( self : Optional[Any] ): __UpperCamelCase = self __UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(__A ) yield node.data __UpperCamelCase = node.next_node @property def _lowerCamelCase ( self : List[str] ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": a__ : Dict =Node(1) a__ : Optional[int] =Node(2) a__ : List[str] =Node(3) a__ : Optional[int] =Node(4) print(root_node.has_loop) # False a__ : str =root_node.next_node print(root_node.has_loop) # True a__ : Optional[int] =Node(5) a__ : List[Any] =Node(6) a__ : int =Node(5) a__ : Tuple =Node(6) print(root_node.has_loop) # False a__ : str =Node(1) print(root_node.has_loop) # False
53
0
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) lowercase__ :Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name lowercase__ :Optional[Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n" def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=8 ): '''simple docstring''' lowercase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 lowercase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class lowercase ( SCREAMING_SNAKE_CASE__ ): def __init__( self ,A__ ,A__ ,A__ ,): super().__init__() self.register_modules( unet=A__ ,scheduler=A__ ,movq=A__ ,) lowercase = 2 ** (len(self.movq.config.block_out_channels) - 1) def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__): if latents is None: lowercase = randn_tensor(A__ ,generator=A__ ,device=A__ ,dtype=A__) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}') lowercase = latents.to(A__) lowercase = latents * scheduler.init_noise_sigma return latents def A__ ( self ,A__=0): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''') lowercase = torch.device(f'cuda:{gpu_id}') lowercase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(A__ ,A__) def A__ ( self ,A__=0): if is_accelerate_available() and is_accelerate_version('''>=''' ,'''0.17.0.dev0'''): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''') lowercase = torch.device(f'cuda:{gpu_id}') if self.device.type != "cpu": self.to('''cpu''' ,silence_dtype_warnings=A__) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowercase = None for cpu_offloaded_model in [self.unet, self.movq]: lowercase , lowercase = cpu_offload_with_hook(A__ ,A__ ,prev_module_hook=A__) # We'll offload the last model manually. lowercase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def A__ ( self): if not hasattr(self.unet ,'''_hf_hook'''): return self.device for module in self.unet.modules(): if ( hasattr(A__ ,'''_hf_hook''') and hasattr(module._hf_hook ,'''execution_device''') and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device @torch.no_grad() @replace_example_docstring(A__) def __call__( self ,A__ ,A__ ,A__ = 5_1_2 ,A__ = 5_1_2 ,A__ = 1_0_0 ,A__ = 4.0 ,A__ = 1 ,A__ = None ,A__ = None ,A__ = "pil" ,A__ = True ,): lowercase = self._execution_device lowercase = guidance_scale > 1.0 if isinstance(A__ ,A__): lowercase = torch.cat(A__ ,dim=0) lowercase = image_embeds.shape[0] * num_images_per_prompt if isinstance(A__ ,A__): lowercase = torch.cat(A__ ,dim=0) if do_classifier_free_guidance: lowercase = image_embeds.repeat_interleave(A__ ,dim=0) lowercase = negative_image_embeds.repeat_interleave(A__ ,dim=0) lowercase = torch.cat([negative_image_embeds, image_embeds] ,dim=0).to(dtype=self.unet.dtype ,device=A__) self.scheduler.set_timesteps(A__ ,device=A__) lowercase = self.scheduler.timesteps lowercase = self.unet.config.in_channels lowercase , lowercase = downscale_height_and_width(A__ ,A__ ,self.movq_scale_factor) # create initial latent lowercase = self.prepare_latents( (batch_size, num_channels_latents, height, width) ,image_embeds.dtype ,A__ ,A__ ,A__ ,self.scheduler ,) for i, t in enumerate(self.progress_bar(A__)): # expand the latents if we are doing classifier free guidance lowercase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents lowercase = {'''image_embeds''': image_embeds} lowercase = self.unet( sample=A__ ,timestep=A__ ,encoder_hidden_states=A__ ,added_cond_kwargs=A__ ,return_dict=A__ ,)[0] if do_classifier_free_guidance: lowercase , lowercase = noise_pred.split(latents.shape[1] ,dim=1) lowercase , lowercase = noise_pred.chunk(2) lowercase , lowercase = variance_pred.chunk(2) lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowercase = torch.cat([noise_pred, variance_pred_text] ,dim=1) if not ( hasattr(self.scheduler.config ,'''variance_type''') and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowercase , lowercase = noise_pred.split(latents.shape[1] ,dim=1) # compute the previous noisy sample x_t -> x_t-1 lowercase = self.scheduler.step( A__ ,A__ ,A__ ,generator=A__ ,)[0] # post-processing lowercase = self.movq.decode(A__ ,force_not_quantize=A__)['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}') if output_type in ["np", "pil"]: lowercase = image * 0.5 + 0.5 lowercase = image.clamp(0 ,1) lowercase = image.cpu().permute(0 ,2 ,3 ,1).float().numpy() if output_type == "pil": lowercase = self.numpy_to_pil(A__) if not return_dict: return (image,) return ImagePipelineOutput(images=A__)
101
'''simple docstring''' a__ : Optional[Any] =256 # Modulus to hash a string a__ : Dict =1_000_003 def lowercase__ ( __lowercase : str , __lowercase : str ) -> bool: """simple docstring""" __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) if p_len > t_len: return False __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 1 # Calculating the hash of pattern and substring of text for i in range(__lowercase ): __UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __UpperCamelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __UpperCamelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def lowercase__ ( ) -> None: """simple docstring""" __UpperCamelCase = 'abc1abc12' __UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __UpperCamelCase = 'alskfjaldsk23adsfabcabc' assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase ) # Test 2) __UpperCamelCase = 'ABABX' __UpperCamelCase = 'ABABZABABYABABX' assert rabin_karp(__lowercase , __lowercase ) # Test 3) __UpperCamelCase = 'AAAB' __UpperCamelCase = 'ABAAAAAB' assert rabin_karp(__lowercase , __lowercase ) # Test 4) __UpperCamelCase = 'abcdabcy' __UpperCamelCase = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(__lowercase , __lowercase ) # Test 5) __UpperCamelCase = 'Lü' __UpperCamelCase = 'Lüsai' assert rabin_karp(__lowercase , __lowercase ) __UpperCamelCase = 'Lue' assert not rabin_karp(__lowercase , __lowercase ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
53
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule SCREAMING_SNAKE_CASE : Optional[int] = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
102
'''simple docstring''' from __future__ import annotations class snake_case : """simple docstring""" def __init__( self : Optional[int] , __A : list[list[int]] ): __UpperCamelCase = TypeError( 'Matrices must be formed from a list of zero or more lists containing at ' 'least one and the same number of values, each of which must be of type ' 'int or float.' ) if len(__A ) != 0: __UpperCamelCase = len(rows[0] ) if cols == 0: raise error for row in rows: if len(__A ) != cols: raise error for value in row: if not isinstance(__A , (int, float) ): raise error __UpperCamelCase = rows else: __UpperCamelCase = [] def _lowerCamelCase ( self : int ): return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def _lowerCamelCase ( self : str ): return len(self.rows ) @property def _lowerCamelCase ( self : Any ): return len(self.rows[0] ) @property def _lowerCamelCase ( self : Optional[Any] ): return (self.num_rows, self.num_columns) @property def _lowerCamelCase ( self : Dict ): return self.order[0] == self.order[1] def _lowerCamelCase ( self : Any ): __UpperCamelCase = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Any ): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def _lowerCamelCase ( self : List[str] ): return bool(self.determinant() ) def _lowerCamelCase ( self : Dict , __A : int , __A : int ): __UpperCamelCase = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(__A ).determinant() def _lowerCamelCase ( self : Dict , __A : int , __A : int ): if (row + column) % 2 == 0: return self.get_minor(__A , __A ) return -1 * self.get_minor(__A , __A ) def _lowerCamelCase ( self : List[str] ): return Matrix( [ [self.get_minor(__A , __A ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def _lowerCamelCase ( self : Union[str, Any] ): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = self.determinant() if not determinant: raise TypeError('Only matrices with a non-zero determinant have an inverse' ) return self.adjugate() * (1 / determinant) def __repr__( self : Optional[Any] ): return str(self.rows ) def __str__( self : Union[str, Any] ): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '[' + '. '.join([str(__A ) for value in row] ) + '.]' for row in self.rows ] ) + "]" ) def _lowerCamelCase ( self : List[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError('Row must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in row: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_columns: raise ValueError( 'Row must be equal in length to the other rows in the matrix' ) if position is None: self.rows.append(__A ) else: __UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:] def _lowerCamelCase ( self : Optional[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError( 'Column must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in column: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_rows: raise ValueError( 'Column must be equal in length to the other columns in the matrix' ) if position is None: __UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: __UpperCamelCase = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self : Tuple , __A : object ): if not isinstance(__A , __A ): return NotImplemented return self.rows == other.rows def __ne__( self : Any , __A : object ): return not self == other def __neg__( self : List[Any] ): return self * -1 def __add__( self : List[str] , __A : Matrix ): if self.order != other.order: raise ValueError('Addition requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self : str , __A : Matrix ): if self.order != other.order: raise ValueError('Subtraction requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self : str , __A : Matrix | int | float ): if isinstance(__A , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(__A , __A ): if self.num_columns != other.num_rows: raise ValueError( 'The number of columns in the first matrix must ' 'be equal to the number of rows in the second' ) return Matrix( [ [Matrix.dot_product(__A , __A ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( 'A Matrix can only be multiplied by an int, float, or another matrix' ) def __pow__( self : Union[str, Any] , __A : int ): if not isinstance(__A , __A ): raise TypeError('A Matrix can only be raised to the power of an int' ) if not self.is_square: raise ValueError('Only square matrices can be raised to a power' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( 'Only invertable matrices can be raised to a negative power' ) __UpperCamelCase = self for _ in range(other - 1 ): result *= self return result @classmethod def _lowerCamelCase ( cls : Tuple , __A : list[int] , __A : list[int] ): return sum(row[i] * column[i] for i in range(len(__A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
53
0
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline A__ : Union[str, Any] = { '''n_samples''': 64, '''horizon''': 32, '''num_inference_steps''': 20, '''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network '''scale_grad_by_std''': True, '''scale''': 0.1, '''eta''': 0.0, '''t_grad_cutoff''': 2, '''device''': '''cpu''', } if __name__ == "__main__": A__ : Optional[int] = '''hopper-medium-v2''' A__ : int = gym.make(env_name) A__ : Optional[int] = ValueGuidedRLPipeline.from_pretrained( '''bglick13/hopper-medium-v2-value-function-hor32''', env=env, ) env.seed(0) A__ : int = env.reset() A__ : Optional[int] = 0 A__ : Union[str, Any] = 0 A__ : Union[str, Any] = 1000 A__ : Optional[Any] = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy A__ : Union[str, Any] = pipeline(obs, planning_horizon=32) # execute action in environment A__ , A__ , A__ , A__ : str = env.step(denorm_actions) A__ : Dict = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' F''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) A__ : List[str] = next_observation except KeyboardInterrupt: pass print(F'''Total reward: {total_reward}''')
103
'''simple docstring''' import os import numpy import onnx def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Dict: """simple docstring""" __UpperCamelCase = a.name __UpperCamelCase = b.name __UpperCamelCase = '' __UpperCamelCase = '' __UpperCamelCase = a == b __UpperCamelCase = name_a __UpperCamelCase = name_b return res def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : List[Any] ) -> Optional[int]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(__lowercase , __lowercase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) _graph_replace_input_with(node_proto.attribute[1].g , __lowercase , __lowercase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) def lowercase__ ( __lowercase : int , __lowercase : List[Any] , __lowercase : Dict ) -> int: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(__lowercase , __lowercase , __lowercase ) def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : str ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __UpperCamelCase = inits[i].name __UpperCamelCase = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = os.path.dirname(__lowercase ) __UpperCamelCase = os.path.basename(__lowercase ) __UpperCamelCase = onnx.load(os.path.join(__lowercase , __lowercase ) ) __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = set() __UpperCamelCase = {} __UpperCamelCase = [] __UpperCamelCase = 0 for i in range(len(__lowercase ) ): if i in dup_set: continue for j in range(i + 1 , len(__lowercase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(__lowercase ) dup_set.add(__lowercase ) __UpperCamelCase = inits[j].data_type __UpperCamelCase = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , __lowercase ) total_reduced_size += mem_size __UpperCamelCase = inits[i].name __UpperCamelCase = inits[j].name if name_i in dup_map: dup_map[name_i].append(__lowercase ) else: __UpperCamelCase = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) __UpperCamelCase = sorted(__lowercase ) _remove_dup_initializers_from_model(__lowercase , __lowercase , __lowercase ) __UpperCamelCase = 'optimized_' + model_file_name __UpperCamelCase = os.path.join(__lowercase , __lowercase ) onnx.save(__lowercase , __lowercase ) return new_model
53
0
'''simple docstring''' import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class lowercase_ (unittest.TestCase ): """simple docstring""" def __init__( self : Dict ,lowercase__ : Any ,lowercase__ : int=7 ,lowercase__ : Dict=3 ,lowercase__ : Tuple=1_8 ,lowercase__ : List[Any]=3_0 ,lowercase__ : str=4_0_0 ,lowercase__ : List[str]=True ,lowercase__ : Union[str, Any]=None ,lowercase__ : Dict=True ,lowercase__ : Any=None ,lowercase__ : int=True ,lowercase__ : Any=[0.5, 0.5, 0.5] ,lowercase__ : int=[0.5, 0.5, 0.5] ,lowercase__ : List[str]=False ,): __lowercase = size if size is not None else {'''height''': 2_0, '''width''': 2_0} __lowercase = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8} __lowercase = parent __lowercase = batch_size __lowercase = num_channels __lowercase = image_size __lowercase = min_resolution __lowercase = max_resolution __lowercase = do_resize __lowercase = size __lowercase = do_center_crop __lowercase = crop_size __lowercase = do_normalize __lowercase = image_mean __lowercase = image_std __lowercase = do_reduce_labels def SCREAMING_SNAKE_CASE ( self : Tuple ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def _A ( ): """simple docstring""" __lowercase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) __lowercase = Image.open(dataset[0]['''file'''] ) __lowercase = Image.open(dataset[1]['''file'''] ) return image, map def _A ( ): """simple docstring""" __lowercase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) __lowercase = Image.open(ds[0]['''file'''] ) __lowercase = Image.open(ds[1]['''file'''] ) __lowercase = Image.open(ds[2]['''file'''] ) __lowercase = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class lowercase_ (lowerCamelCase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : int = BeitImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = BeitImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE ( self : int ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase__ ,'''do_resize''' ) ) self.assertTrue(hasattr(lowercase__ ,'''size''' ) ) self.assertTrue(hasattr(lowercase__ ,'''do_center_crop''' ) ) self.assertTrue(hasattr(lowercase__ ,'''center_crop''' ) ) self.assertTrue(hasattr(lowercase__ ,'''do_normalize''' ) ) self.assertTrue(hasattr(lowercase__ ,'''image_mean''' ) ) self.assertTrue(hasattr(lowercase__ ,'''image_std''' ) ) def SCREAMING_SNAKE_CASE ( self : Any ): __lowercase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'''height''': 2_0, '''width''': 2_0} ) self.assertEqual(image_processor.crop_size ,{'''height''': 1_8, '''width''': 1_8} ) self.assertEqual(image_processor.do_reduce_labels ,lowercase__ ) __lowercase = self.image_processing_class.from_dict( self.image_processor_dict ,size=4_2 ,crop_size=8_4 ,reduce_labels=lowercase__ ) self.assertEqual(image_processor.size ,{'''height''': 4_2, '''width''': 4_2} ) self.assertEqual(image_processor.crop_size ,{'''height''': 8_4, '''width''': 8_4} ) self.assertEqual(image_processor.do_reduce_labels ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): pass def SCREAMING_SNAKE_CASE ( self : Tuple ): # Initialize image_processing __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ ,Image.Image ) # Test not batched input __lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched __lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): # Initialize image_processing __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,numpify=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ ,np.ndarray ) # Test not batched input __lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched __lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): # Initialize image_processing __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,torchify=lowercase__ ) for image in image_inputs: self.assertIsInstance(lowercase__ ,torch.Tensor ) # Test not batched input __lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched __lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): # Initialize image_processing __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,torchify=lowercase__ ) __lowercase = [] for image in image_inputs: self.assertIsInstance(lowercase__ ,torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input __lowercase = image_processing(image_inputs[0] ,maps[0] ,return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) self.assertEqual( encoding['''labels'''].shape ,( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) self.assertEqual(encoding['''labels'''].dtype ,torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 ) # Test batched __lowercase = image_processing(lowercase__ ,lowercase__ ,return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) self.assertEqual( encoding['''labels'''].shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) self.assertEqual(encoding['''labels'''].dtype ,torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 ) # Test not batched input (PIL images) __lowercase , __lowercase = prepare_semantic_single_inputs() __lowercase = image_processing(lowercase__ ,lowercase__ ,return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) self.assertEqual( encoding['''labels'''].shape ,( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) self.assertEqual(encoding['''labels'''].dtype ,torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 ) # Test batched input (PIL images) __lowercase , __lowercase = prepare_semantic_batch_inputs() __lowercase = image_processing(lowercase__ ,lowercase__ ,return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape ,( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) self.assertEqual( encoding['''labels'''].shape ,( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) self.assertEqual(encoding['''labels'''].dtype ,torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 ) def SCREAMING_SNAKE_CASE ( self : Any ): # Initialize image_processing __lowercase = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 __lowercase , __lowercase = prepare_semantic_single_inputs() __lowercase = image_processing(lowercase__ ,lowercase__ ,return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 1_5_0 ) __lowercase = True __lowercase = image_processing(lowercase__ ,lowercase__ ,return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
104
'''simple docstring''' import random def lowercase__ ( __lowercase : list , __lowercase : Optional[Any] ) -> tuple: """simple docstring""" __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = [], [], [] for element in data: if element < pivot: less.append(__lowercase ) elif element > pivot: greater.append(__lowercase ) else: equal.append(__lowercase ) return less, equal, greater def lowercase__ ( __lowercase : list , __lowercase : int ) -> Dict: """simple docstring""" if index >= len(__lowercase ) or index < 0: return None __UpperCamelCase = items[random.randint(0 , len(__lowercase ) - 1 )] __UpperCamelCase = 0 __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = _partition(__lowercase , __lowercase ) __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__lowercase , __lowercase ) # must be in larger else: return quick_select(__lowercase , index - (m + count) )
53
0
"""simple docstring""" import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class __UpperCamelCase ( a__ ): lowerCamelCase : Optional[Any] =(DDIMParallelScheduler,) lowerCamelCase : Optional[int] =(("""eta""", 0.0), ("""num_inference_steps""", 50)) def __a ( self , **lowerCAmelCase__ ) -> Optional[int]: a : Any = { "num_train_timesteps": 1000, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**lowerCAmelCase__ ) return config def __a ( self , **lowerCAmelCase__ ) -> List[Any]: a : List[Any] = self.scheduler_classes[0] a : Any = self.get_scheduler_config(**lowerCAmelCase__ ) a : Optional[int] = scheduler_class(**lowerCAmelCase__ ) a, a : Dict = 10, 0.0 a : Optional[int] = self.dummy_model() a : Dict = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase__ ) for t in scheduler.timesteps: a : List[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ ) a : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample return sample def __a ( self ) -> List[str]: for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__ ) def __a ( self ) -> Optional[Any]: for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowerCAmelCase__ ) a : Dict = self.scheduler_classes[0] a : str = self.get_scheduler_config(steps_offset=1 ) a : Any = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def __a ( self ) -> Optional[Any]: for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ ) def __a ( self ) -> int: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase__ ) def __a ( self ) -> Any: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__ ) def __a ( self ) -> Any: for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowerCAmelCase__ ) def __a ( self ) -> Optional[Any]: for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=lowerCAmelCase__ ) def __a ( self ) -> str: for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase__ ) def __a ( self ) -> str: self.check_over_configs(thresholding=lowerCAmelCase__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , ) def __a ( self ) -> List[str]: for t in [1, 10, 49]: self.check_over_forward(time_step=lowerCAmelCase__ ) def __a ( self ) -> Optional[Any]: for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ ) def __a ( self ) -> List[Any]: for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=lowerCAmelCase__ , eta=lowerCAmelCase__ ) def __a ( self ) -> Union[str, Any]: a : List[str] = self.scheduler_classes[0] a : Tuple = self.get_scheduler_config() a : Tuple = scheduler_class(**lowerCAmelCase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5 def __a ( self ) -> Tuple: a : Optional[Any] = self.scheduler_classes[0] a : Tuple = self.get_scheduler_config() a : str = scheduler_class(**lowerCAmelCase__ ) a, a : str = 10, 0.0 scheduler.set_timesteps(lowerCAmelCase__ ) a : Tuple = self.dummy_model() a : str = self.dummy_sample_deter a : Dict = self.dummy_sample_deter + 0.1 a : Tuple = self.dummy_sample_deter - 0.1 a : int = samplea.shape[0] a : str = torch.stack([samplea, samplea, samplea] , dim=0 ) a : Any = torch.arange(lowerCAmelCase__ )[0:3, None].repeat(1 , lowerCAmelCase__ ) a : Any = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) a : List[Any] = scheduler.batch_step_no_noise(lowerCAmelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowerCAmelCase__ ) a : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) a : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 1_147.7_904 ) < 1E-2 assert abs(result_mean.item() - 0.4_982 ) < 1E-3 def __a ( self ) -> Optional[Any]: a : List[str] = self.full_loop() a : Any = torch.sum(torch.abs(lowerCAmelCase__ ) ) a : str = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 172.0_067 ) < 1E-2 assert abs(result_mean.item() - 0.223_967 ) < 1E-3 def __a ( self ) -> Dict: a : Dict = self.full_loop(prediction_type="v_prediction" ) a : Optional[int] = torch.sum(torch.abs(lowerCAmelCase__ ) ) a : List[str] = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 52.5_302 ) < 1E-2 assert abs(result_mean.item() - 0.0_684 ) < 1E-3 def __a ( self ) -> Any: # We specify different beta, so that the first alpha is 0.99 a : Dict = self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01 ) a : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) a : List[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 149.8_295 ) < 1E-2 assert abs(result_mean.item() - 0.1_951 ) < 1E-3 def __a ( self ) -> Dict: # We specify different beta, so that the first alpha is 0.99 a : Optional[int] = self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01 ) a : List[str] = torch.sum(torch.abs(lowerCAmelCase__ ) ) a : Any = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 149.0_784 ) < 1E-2 assert abs(result_mean.item() - 0.1_941 ) < 1E-3
105
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowercase__ ( __lowercase : Any ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def lowercase__ ( __lowercase : Tuple ) -> int: """simple docstring""" __UpperCamelCase , __UpperCamelCase = emb.weight.shape __UpperCamelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) __UpperCamelCase = emb.weight.data return lin_layer def lowercase__ ( __lowercase : int , __lowercase : List[str]="facebook/mbart-large-en-ro" , __lowercase : str=False , __lowercase : List[Any]=False ) -> int: """simple docstring""" __UpperCamelCase = torch.load(__lowercase , map_location='cpu' )['model'] remove_ignore_keys_(__lowercase ) __UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0] __UpperCamelCase = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase ) if mbart_aa and finetuned: __UpperCamelCase = 'relu' __UpperCamelCase = state_dict['decoder.embed_tokens.weight'] __UpperCamelCase = MBartForConditionalGeneration(__lowercase ) model.model.load_state_dict(__lowercase ) if finetuned: __UpperCamelCase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a__ : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') a__ : Union[str, Any] =parser.parse_args() a__ : str =convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
53
0
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ : int = [0 for i in range(len(A_ ) )] # initialize interval's left pointer and right pointer lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = 0, 0 for i in range(1 , len(A_ ) ): # case when current index is inside the interval if i <= right_pointer: lowerCAmelCase__ : Optional[int] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) lowerCAmelCase__ : Tuple = min_edge while go_next(A_ , A_ , A_ ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = i, i + z_result[i] - 1 return z_result def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ): return i + z_result[i] < len(A_ ) and s[z_result[i]] == s[i + z_result[i]] def __SCREAMING_SNAKE_CASE ( A_ , A_ ): lowerCAmelCase__ : Dict = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string lowerCAmelCase__ : List[str] = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(A_ ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
106
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : Any , __A : Dict , __A : str , __A : List[Any]=1_0_2_4 , __A : Tuple=1_0_2_4 , __A : str=3.6 ): __UpperCamelCase = tokenizer __UpperCamelCase = tokenizer.bos_token_id __UpperCamelCase = dataset __UpperCamelCase = seq_length __UpperCamelCase = seq_length * chars_per_token * num_of_sequences def __iter__( self : Any ): __UpperCamelCase = iter(self.dataset ) __UpperCamelCase = True while more_examples: __UpperCamelCase , __UpperCamelCase = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(__A )['content'] ) buffer_len += len(buffer[-1] ) except StopIteration: __UpperCamelCase = False break __UpperCamelCase = tokenizer(__A , truncation=__A )['input_ids'] __UpperCamelCase = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(__A ) , self.seq_length ): __UpperCamelCase = all_token_ids[i : i + self.seq_length] if len(__A ) == self.seq_length: yield torch.tensor(__A ) def lowercase__ ( __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = {'streaming': True} __UpperCamelCase = load_dataset(args.dataset_name , split='train' , **__lowercase ) __UpperCamelCase = ConstantLengthDataset(__lowercase , __lowercase , seq_length=args.seq_length ) __UpperCamelCase = DataLoader(__lowercase , batch_size=args.batch_size ) return eval_dataloader def lowercase__ ( __lowercase : Tuple ) -> Optional[Any]: """simple docstring""" model.eval() __UpperCamelCase = [] for step, batch in enumerate(__lowercase ): with torch.no_grad(): __UpperCamelCase = model(__lowercase , labels=__lowercase ) __UpperCamelCase = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__lowercase ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break __UpperCamelCase = torch.mean(torch.cat(__lowercase ) ) try: __UpperCamelCase = torch.exp(__lowercase ) except OverflowError: __UpperCamelCase = float('inf' ) return loss.item(), perplexity.item() # Setup Accelerator a__ : int =Accelerator() # Parse configuration a__ : Dict =HfArgumentParser(EvaluationArguments) a__ : Union[str, Any] =parser.parse_args() set_seed(args.seed) # Logging a__ : List[Any] =logging.getLogger(__name__) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) # Load model and tokenizer a__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(args.model_ckpt) a__ : List[Any] =AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader a__ : Union[str, Any] =create_dataloader(args) # Prepare everything with our `accelerator`. a__ , a__ : List[str] =accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('''Evaluating and saving model after training''') a__ , a__ : Any =evaluate(args) logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
53
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : Optional[Any] = { 'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'], 'tokenization_roberta': ['RobertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[int] = ['RobertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Tuple = [ 'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'RobertaForCausalLM', 'RobertaForMaskedLM', 'RobertaForMultipleChoice', 'RobertaForQuestionAnswering', 'RobertaForSequenceClassification', 'RobertaForTokenClassification', 'RobertaModel', 'RobertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = [ 'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRobertaForCausalLM', 'TFRobertaForMaskedLM', 'TFRobertaForMultipleChoice', 'TFRobertaForQuestionAnswering', 'TFRobertaForSequenceClassification', 'TFRobertaForTokenClassification', 'TFRobertaMainLayer', 'TFRobertaModel', 'TFRobertaPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = [ 'FlaxRobertaForCausalLM', 'FlaxRobertaForMaskedLM', 'FlaxRobertaForMultipleChoice', 'FlaxRobertaForQuestionAnswering', 'FlaxRobertaForSequenceClassification', 'FlaxRobertaForTokenClassification', 'FlaxRobertaModel', 'FlaxRobertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys __lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
107
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging a__ : Any =logging.get_logger(__name__) a__ : Optional[Any] ={ '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict ="gpt_neo" SCREAMING_SNAKE_CASE_ : Optional[int] =["past_key_values"] SCREAMING_SNAKE_CASE_ : List[Any] ={"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Union[str, Any] , __A : Union[str, Any]=5_0_2_5_7 , __A : Any=2_0_4_8 , __A : Optional[Any]=2_0_4_8 , __A : Any=2_4 , __A : Union[str, Any]=[[["global", "local"], 1_2]] , __A : str=1_6 , __A : Optional[int]=None , __A : Union[str, Any]=2_5_6 , __A : Any="gelu_new" , __A : Dict=0.0 , __A : Optional[int]=0.0 , __A : int=0.0 , __A : List[str]=0.1 , __A : Any=1e-5 , __A : int=0.02 , __A : List[str]=True , __A : Tuple=5_0_2_5_6 , __A : Optional[Any]=5_0_2_5_6 , **__A : Optional[Any] , ): __UpperCamelCase = vocab_size __UpperCamelCase = max_position_embeddings __UpperCamelCase = hidden_size __UpperCamelCase = num_layers __UpperCamelCase = num_heads __UpperCamelCase = intermediate_size __UpperCamelCase = window_size __UpperCamelCase = activation_function __UpperCamelCase = resid_dropout __UpperCamelCase = embed_dropout __UpperCamelCase = attention_dropout __UpperCamelCase = classifier_dropout __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = initializer_range __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id __UpperCamelCase = attention_types __UpperCamelCase = self.expand_attention_types_params(__A ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=__A , eos_token_id=__A , **__A ) @staticmethod def _lowerCamelCase ( __A : Tuple ): __UpperCamelCase = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase__ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Any: """simple docstring""" import torch __UpperCamelCase = input.size() __UpperCamelCase = len(__lowercase ) __UpperCamelCase = shape[dimension] __UpperCamelCase = torch.arange(0 , __lowercase , __lowercase ) __UpperCamelCase = torch.div(sizedim - size , __lowercase , rounding_mode='floor' ) + 1 __UpperCamelCase = torch.arange(__lowercase ) + low_indices[:min_length][:, None] __UpperCamelCase = [slice(__lowercase )] * rank __UpperCamelCase = indices __UpperCamelCase = input[s] __UpperCamelCase = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Optional[int]: """simple docstring""" import torch __UpperCamelCase = torch.arange(1 , __lowercase ) __UpperCamelCase = torch.remainder(__lowercase , __lowercase ) __UpperCamelCase = remainders == 0 __UpperCamelCase = candidates[divisor_indices] __UpperCamelCase = torch.max(__lowercase ) return largest_divisor, torch.div(__lowercase , __lowercase , rounding_mode='floor' ) class snake_case ( __lowerCamelCase ): """simple docstring""" @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(__A , direction='inputs' ) __UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return common_inputs @property def _lowerCamelCase ( self : int ): return self._config.num_heads def _lowerCamelCase ( self : List[str] , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ): __UpperCamelCase = super(__A , self ).generate_dummy_inputs( __A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A ) # We need to order the input in the way they appears in the forward() __UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCamelCase = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] __UpperCamelCase = common_inputs['attention_mask'] if self.use_past: __UpperCamelCase = ordered_inputs['attention_mask'].dtype __UpperCamelCase = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 ) return ordered_inputs @property def _lowerCamelCase ( self : Dict ): return 1_3
53
0
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''', '''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''', '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json''' ), } class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : Optional[int] ="longformer" def __init__( self , snake_case__ = 512 , snake_case__ = 2 , snake_case__ = 1 , snake_case__ = 0 , snake_case__ = 2 , snake_case__ = 30_522 , snake_case__ = 768 , snake_case__ = 12 , snake_case__ = 12 , snake_case__ = 3_072 , snake_case__ = "gelu" , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 512 , snake_case__ = 2 , snake_case__ = 0.02 , snake_case__ = 1e-12 , snake_case__ = False , **snake_case__ , ): """simple docstring""" super().__init__(pad_token_id=snake_case__ , **snake_case__ ) lowerCAmelCase : Union[str, Any] = attention_window lowerCAmelCase : List[Any] = sep_token_id lowerCAmelCase : Dict = bos_token_id lowerCAmelCase : int = eos_token_id lowerCAmelCase : Dict = vocab_size lowerCAmelCase : Dict = hidden_size lowerCAmelCase : Optional[Any] = num_hidden_layers lowerCAmelCase : Tuple = num_attention_heads lowerCAmelCase : Union[str, Any] = hidden_act lowerCAmelCase : Optional[int] = intermediate_size lowerCAmelCase : Dict = hidden_dropout_prob lowerCAmelCase : int = attention_probs_dropout_prob lowerCAmelCase : Tuple = max_position_embeddings lowerCAmelCase : Optional[int] = type_vocab_size lowerCAmelCase : Any = initializer_range lowerCAmelCase : Union[str, Any] = layer_norm_eps lowerCAmelCase : List[str] = onnx_export class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None ): """simple docstring""" super().__init__(snake_case__ , snake_case__ , snake_case__ ) lowerCAmelCase : Any = True @property def lowercase__ ( self ): """simple docstring""" if self.task == "multiple-choice": lowerCAmelCase : Tuple = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCAmelCase : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("global_attention_mask", dynamic_axis), ] ) @property def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Tuple = super().outputs if self.task == "default": lowerCAmelCase : Tuple = {0: "batch"} return outputs @property def lowercase__ ( self ): """simple docstring""" return 1e-4 @property def lowercase__ ( self ): """simple docstring""" return max(super().default_onnx_opset , 14 ) def lowercase__ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ): """simple docstring""" lowerCAmelCase : Dict = super().generate_dummy_inputs( preprocessor=snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly lowerCAmelCase : Dict = torch.zeros_like(inputs["input_ids"] ) # make every second token global lowerCAmelCase : List[Any] = 1 return inputs
108
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa" SCREAMING_SNAKE_CASE_ : Dict =( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) SCREAMING_SNAKE_CASE_ : List[str] ="document_qa" SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"] SCREAMING_SNAKE_CASE_ : Any =["text"] def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ): if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*__A , **__A ) def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ): __UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' __UpperCamelCase = task_prompt.replace('{user_input}' , __A ) __UpperCamelCase = self.pre_processor.tokenizer( __A , add_special_tokens=__A , return_tensors='pt' ).input_ids __UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ): return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences def _lowerCamelCase ( self : Tuple , __A : List[Any] ): __UpperCamelCase = self.pre_processor.batch_decode(__A )[0] __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) __UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token __UpperCamelCase = self.pre_processor.tokenajson(__A ) return sequence["answer"]
53
0
"""simple docstring""" from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : List[Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]} return Dataset.from_dict(_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Any = self._create_example_records() UpperCAmelCase : int = Dataset.from_list(_SCREAMING_SNAKE_CASE ) self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] ) for i, r in enumerate(_SCREAMING_SNAKE_CASE ): self.assertDictEqual(_SCREAMING_SNAKE_CASE , example_records[i] ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : List[str] = self._create_example_records() UpperCAmelCase : List[str] = Dataset.from_list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[str] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: # checks what happens with missing columns '''simple docstring''' UpperCAmelCase : Optional[int] = [{"""col_1""": 1}, {"""col_2""": """x"""}] UpperCAmelCase : List[str] = Dataset.from_list(_SCREAMING_SNAKE_CASE ) self.assertDictEqual(dset[0] , {"""col_1""": 1} ) self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns def SCREAMING_SNAKE_CASE ( self ) -> Tuple: # checks if the type can be inferred from the second record '''simple docstring''' UpperCAmelCase : Union[str, Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}] UpperCAmelCase : List[Any] = Dataset.from_list(_SCREAMING_SNAKE_CASE ) self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Dict = Dataset.from_list([] ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 0 ) self.assertListEqual(dset.column_names , [] )
109
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
53
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'} lowerCAmelCase = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } lowerCAmelCase = { 'camembert-base': 512, } lowerCAmelCase = '▁' class _a ( UpperCamelCase__ ): _lowercase : List[Any] = VOCAB_FILES_NAMES _lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Optional[Any] = ['''input_ids''', '''attention_mask'''] def __init__( self: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: str="<s>" , UpperCamelCase_: str="</s>" , UpperCamelCase_: Any="</s>" , UpperCamelCase_: Dict="<s>" , UpperCamelCase_: Dict="<unk>" , UpperCamelCase_: Union[str, Any]="<pad>" , UpperCamelCase_: int="<mask>" , UpperCamelCase_: Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: List[Any] , ) -> None: """simple docstring""" lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) lowercase__ = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> lowercase__ = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3} lowercase__ = len(self.fairseq_tokens_to_ids ) lowercase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowerCamelCase_ ( self: int , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]: """simple docstring""" return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def lowerCamelCase_ ( self: List[str] ) -> int: """simple docstring""" lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: str ) -> List[str]: """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Any] ) -> Optional[Any]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(UpperCamelCase_ ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[str] ) -> Dict: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[int] ) -> List[Any]: """simple docstring""" lowercase__ = [] lowercase__ = '''''' lowercase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCamelCase_ ) + token lowercase__ = True lowercase__ = [] else: current_sub_tokens.append(UpperCamelCase_ ) lowercase__ = False out_string += self.sp_model.decode(UpperCamelCase_ ) return out_string.strip() def __getstate__( self: List[str] ) -> Union[str, Any]: """simple docstring""" lowercase__ = self.__dict__.copy() lowercase__ = None return state def __setstate__( self: Any , UpperCamelCase_: List[str] ) -> Dict: """simple docstring""" lowercase__ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase__ = {} lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase_ ( self: str , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCamelCase_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowercase__ = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , '''wb''' ) as fi: lowercase__ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,)
110
'''simple docstring''' import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase__ ( __lowercase : Features ) -> Optional[int]: """simple docstring""" __UpperCamelCase = np.inf def set_batch_size(__lowercase : FeatureType ) -> None: nonlocal batch_size if isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary": __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__lowercase , __lowercase ) return None if batch_size is np.inf else batch_size class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : List[str] , __A : NestedDataStructureLike[PathLike] , __A : Optional[NamedSplit] = None , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[int] = None , **__A : Dict , ): super().__init__( __A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , ) __UpperCamelCase = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths} __UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1] __UpperCamelCase = Parquet( cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , ) def _lowerCamelCase ( self : Optional[int] ): # Build iterable dataset if self.streaming: __UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None self.builder.download_and_prepare( download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , ) __UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=__A , in_memory=self.keep_in_memory ) return dataset class snake_case : """simple docstring""" def __init__( self : List[str] , __A : Dataset , __A : Union[PathLike, BinaryIO] , __A : Optional[int] = None , **__A : Dict , ): __UpperCamelCase = dataset __UpperCamelCase = path_or_buf __UpperCamelCase = batch_size or get_writer_batch_size(dataset.features ) __UpperCamelCase = parquet_writer_kwargs def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , 'wb+' ) as buffer: __UpperCamelCase = self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs ) else: __UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs ) return written def _lowerCamelCase ( self : List[str] , __A : BinaryIO , __A : int , **__A : List[str] ): __UpperCamelCase = 0 __UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __A ) __UpperCamelCase = self.dataset.features.arrow_schema __UpperCamelCase = pq.ParquetWriter(__A , schema=__A , **__A ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): __UpperCamelCase = query_table( table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__A ) written += batch.nbytes writer.close() return written
53
0
import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__( __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : Tuple = GPTSanJapaneseTokenizer UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Optional[Any] = {"do_clean_text": False, "add_prefix_space": False} def lowerCAmelCase__ ( self: str ): super().setUp() # fmt: off __lowerCamelCase = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on __lowerCamelCase = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀 __lowerCamelCase = {"""unk_token""": """<unk>"""} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.emoji_file , """w""" ) as emoji_writer: emoji_writer.write(json.dumps(__A ) ) def lowerCAmelCase__ ( self: Union[str, Any] , **UpperCamelCase_: List[Any] ): kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A ) def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ): __lowerCamelCase = """こんにちは、世界。 \nこんばんは、㔺界。😀""" __lowerCamelCase = """こんにちは、世界。 \nこんばんは、世界。😀""" return input_text, output_text def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any ): __lowerCamelCase, __lowerCamelCase = self.get_input_output_texts(__A ) __lowerCamelCase = tokenizer.encode(__A , add_special_tokens=__A ) __lowerCamelCase = tokenizer.decode(__A , clean_up_tokenization_spaces=__A ) return text, ids def lowerCAmelCase__ ( self: int ): pass # TODO add if relevant def lowerCAmelCase__ ( self: Union[str, Any] ): pass # TODO add if relevant def lowerCAmelCase__ ( self: Optional[int] ): pass # TODO add if relevant def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = self.get_tokenizer() # Testing tokenization __lowerCamelCase = """こんにちは、世界。 こんばんは、㔺界。""" __lowerCamelCase = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""] __lowerCamelCase = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) # Testing conversion to ids without special tokens __lowerCamelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] __lowerCamelCase = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A , __A ) # Testing conversion to ids with special tokens __lowerCamelCase = tokens + [tokenizer.unk_token] __lowerCamelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] __lowerCamelCase = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A , __A ) def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = self.get_tokenizer() # Testing tokenization __lowerCamelCase = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。""" __lowerCamelCase = """こんにちは、、、、世界。こんばんは、、、、世界。""" __lowerCamelCase = tokenizer.encode(__A ) __lowerCamelCase = tokenizer.decode(__A ) self.assertEqual(__A , __A ) @slow def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization __lowerCamelCase = """こんにちは、世界。""" __lowerCamelCase = """こんばんは、㔺界。😀""" __lowerCamelCase = """こんにちは、世界。こんばんは、世界。😀""" __lowerCamelCase = tokenizer.encode(prefix_text + input_text ) __lowerCamelCase = tokenizer.encode("""""" , prefix_text=prefix_text + input_text ) __lowerCamelCase = tokenizer.encode(__A , prefix_text=__A ) __lowerCamelCase = tokenizer.decode(__A ) __lowerCamelCase = tokenizer.decode(__A ) __lowerCamelCase = tokenizer.decode(__A ) self.assertEqual(__A , __A ) self.assertEqual(__A , __A ) self.assertEqual(__A , __A ) @slow def lowerCAmelCase__ ( self: Optional[Any] ): __lowerCamelCase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) # Testing tokenization __lowerCamelCase = """こんにちは、世界。""" __lowerCamelCase = """こんばんは、㔺界。😀""" __lowerCamelCase = len(tokenizer.encode(__A ) ) - 2 __lowerCamelCase = len(tokenizer.encode(__A ) ) - 2 __lowerCamelCase = [1] + [0] * (len_prefix + len_text + 1) __lowerCamelCase = [1] * (len_prefix + len_text + 1) + [0] __lowerCamelCase = [1] + [1] * (len_prefix) + [0] * (len_text + 1) __lowerCamelCase = tokenizer(prefix_text + input_text ).token_type_ids __lowerCamelCase = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids __lowerCamelCase = tokenizer(__A , prefix_text=__A ).token_type_ids self.assertListEqual(__A , __A ) self.assertListEqual(__A , __A ) self.assertListEqual(__A , __A ) @slow def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) __lowerCamelCase = tokenizer.encode("""あンいワ""" ) __lowerCamelCase = tokenizer.encode("""""" , prefix_text="""あンいワ""" ) __lowerCamelCase = tokenizer.encode("""いワ""" , prefix_text="""あン""" ) self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) ) self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) ) self.assertNotEqual(__A , __A ) self.assertNotEqual(__A , __A ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" ) __lowerCamelCase = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]] __lowerCamelCase = tokenizer(__A , padding=__A ) __lowerCamelCase = tokenizer.batch_encode_plus(__A , padding=__A ) # fmt: off __lowerCamelCase = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]] __lowerCamelCase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] __lowerCamelCase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , __A ) self.assertListEqual(x_token.token_type_ids , __A ) self.assertListEqual(x_token.attention_mask , __A ) self.assertListEqual(x_token_a.input_ids , __A ) self.assertListEqual(x_token_a.token_type_ids , __A ) self.assertListEqual(x_token_a.attention_mask , __A ) def lowerCAmelCase__ ( self: str ): # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def lowerCAmelCase__ ( self: str ): # tokenizer has no padding token pass
12
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def lowercase__ ( __lowercase : SplitDict ) -> int: """simple docstring""" __UpperCamelCase = split_dict._to_yaml_list() assert len(__lowercase ) == len(__lowercase ) __UpperCamelCase = SplitDict._from_yaml_list(__lowercase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump __UpperCamelCase = None # the split name of split_dict takes over the name of the split info object __UpperCamelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] ) def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" __UpperCamelCase = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
53
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __UpperCamelCase : Optional[Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowercase__ ( __lowerCamelCase): UpperCamelCase_ = ["pixel_values"] def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : str , ): '''simple docstring''' super().__init__(**__A ) SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 224} SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__A , default_to_square=__A ) SCREAMING_SNAKE_CASE : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} SCREAMING_SNAKE_CASE : int = get_size_dict(__A , default_to_square=__A , param_name='''crop_size''' ) SCREAMING_SNAKE_CASE : List[str] = do_resize SCREAMING_SNAKE_CASE : Optional[Any] = size SCREAMING_SNAKE_CASE : int = resample SCREAMING_SNAKE_CASE : Tuple = do_center_crop SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size SCREAMING_SNAKE_CASE : Any = do_rescale SCREAMING_SNAKE_CASE : int = rescale_factor SCREAMING_SNAKE_CASE : List[str] = do_normalize SCREAMING_SNAKE_CASE : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN SCREAMING_SNAKE_CASE : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD SCREAMING_SNAKE_CASE : Tuple = do_convert_rgb def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = get_size_dict(__A , default_to_square=__A ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(__A , size=size['''shortest_edge'''] , default_to_square=__A ) return resize(__A , size=__A , resample=__A , data_format=__A , **__A ) def __A ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = get_size_dict(__A ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__A , size=(size['''height'''], size['''width''']) , data_format=__A , **__A ) def __A ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ): '''simple docstring''' return rescale(__A , scale=__A , data_format=__A , **__A ) def __A ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ): '''simple docstring''' return normalize(__A , mean=__A , std=__A , data_format=__A , **__A ) def __A ( self : Union[str, Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE : Dict = size if size is not None else self.size SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(__A , param_name='''size''' , default_to_square=__A ) SCREAMING_SNAKE_CASE : int = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__A , param_name='''crop_size''' , default_to_square=__A ) SCREAMING_SNAKE_CASE : List[str] = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb SCREAMING_SNAKE_CASE : int = make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: SCREAMING_SNAKE_CASE : Any = [convert_to_rgb(__A ) for image in images] # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE : Optional[int] = [to_numpy_array(__A ) for image in images] if do_resize: SCREAMING_SNAKE_CASE : List[str] = [self.resize(image=__A , size=__A , resample=__A ) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE : str = [self.center_crop(image=__A , size=__A ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE : str = [self.rescale(image=__A , scale=__A ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images] SCREAMING_SNAKE_CASE : Tuple = [to_channel_dimension_format(__A , __A ) for image in images] SCREAMING_SNAKE_CASE : Union[str, Any] = {'''pixel_values''': images} return BatchFeature(data=__A , tensor_type=__A )
182
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[str] ={ '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any =[ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { '''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''', } class UpperCAmelCase__ ( __lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Tuple = "mgp-str" def __init__( self , A_=[32, 128] , A_=4 , A_=3 , A_=27 , A_=38 , A_=50257 , A_=30522 , A_=768 , A_=12 , A_=12 , A_=4.0 , A_=True , A_=False , A_=1E-5 , A_=0.0 , A_=0.0 , A_=0.0 , A_=False , A_=0.02 , **A_ , ) -> List[Any]: super().__init__(**__A ) __UpperCamelCase =image_size __UpperCamelCase =patch_size __UpperCamelCase =num_channels __UpperCamelCase =max_token_length __UpperCamelCase =num_character_labels __UpperCamelCase =num_bpe_labels __UpperCamelCase =num_wordpiece_labels __UpperCamelCase =hidden_size __UpperCamelCase =num_hidden_layers __UpperCamelCase =num_attention_heads __UpperCamelCase =mlp_ratio __UpperCamelCase =distilled __UpperCamelCase =layer_norm_eps __UpperCamelCase =drop_rate __UpperCamelCase =qkv_bias __UpperCamelCase =attn_drop_rate __UpperCamelCase =drop_path_rate __UpperCamelCase =output_aa_attentions __UpperCamelCase =initializer_range
62
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a__ : str =logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str =["input_features", "attention_mask"] def __init__( self : Union[str, Any] , __A : Optional[int]=8_0 , __A : Tuple=1_6_0_0_0 , __A : Optional[Any]=8_0 , __A : Any=0.0 , __A : Any=True , __A : List[str]=True , __A : str=True , **__A : List[Any] , ): super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A ) __UpperCamelCase = num_mel_bins __UpperCamelCase = do_ceptral_normalize __UpperCamelCase = normalize_means __UpperCamelCase = normalize_vars __UpperCamelCase = True def _lowerCamelCase ( self : Union[str, Any] , __A : np.ndarray , ): __UpperCamelCase = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers __UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 ) __UpperCamelCase = ta_kaldi.fbank(__A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _lowerCamelCase ( __A : np.ndarray , __A : int , __A : Optional[bool] = True , __A : Optional[bool] = True , __A : float = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: __UpperCamelCase = x[:input_length].mean(axis=0 ) __UpperCamelCase = np.subtract(__A , __A ) if normalize_vars: __UpperCamelCase = x[:input_length].std(axis=0 ) __UpperCamelCase = np.divide(__A , __A ) if input_length < x.shape[0]: __UpperCamelCase = padding_value # make sure array is in float32 __UpperCamelCase = x.astype(np.floataa ) return x def _lowerCamelCase ( self : int , __A : List[np.ndarray] , __A : Optional[np.ndarray] = None ): __UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__A , __A , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(__A , __A ) ] def __call__( self : List[Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : Union[bool, str, PaddingStrategy] = False , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[int] = None , __A : Optional[bool] = None , **__A : Dict , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __UpperCamelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) __UpperCamelCase = is_batched_numpy or ( isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__A , np.ndarray ): __UpperCamelCase = np.asarray(__A , dtype=np.floataa ) elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __UpperCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __UpperCamelCase = [raw_speech] # extract fbank features __UpperCamelCase = [self._extract_fbank_features(__A ) for waveform in raw_speech] # convert into correct format for padding __UpperCamelCase = BatchFeature({'input_features': features} ) __UpperCamelCase = self.pad( __A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , ) # make sure list is in array format __UpperCamelCase = padded_inputs.get('input_features' ) if isinstance(input_features[0] , __A ): __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_features] __UpperCamelCase = padded_inputs.get('attention_mask' ) if attention_mask is not None: __UpperCamelCase = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __UpperCamelCase = ( np.array(__A , dtype=np.intaa ) if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD else None ) __UpperCamelCase = self.normalize( padded_inputs['input_features'] , attention_mask=__A ) if return_tensors is not None: __UpperCamelCase = padded_inputs.convert_to_tensors(__A ) return padded_inputs
53
0
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function lowerCAmelCase_ : Any = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s lowerCAmelCase_ : List[Any] = 3e8 # unit of c : m * s^-1 def _lowerCamelCase ( lowercase : float , lowercase : float , lowercase : float ) -> dict[str, float]: if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: _a = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: _a = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: _a = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
63
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : List[Any] =logging.get_logger(__name__) a__ : List[Any] ={ '''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model" def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ): super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = project_dim class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model" def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ): super().__init__(**__A ) __UpperCamelCase = hidden_size __UpperCamelCase = intermediate_size __UpperCamelCase = projection_dim __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = num_channels __UpperCamelCase = patch_size __UpperCamelCase = image_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = attention_dropout __UpperCamelCase = layer_norm_eps __UpperCamelCase = hidden_act @classmethod def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ): cls._set_token_in_kwargs(__A ) __UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('model_type' ) == "altclip": __UpperCamelCase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] ="altclip" SCREAMING_SNAKE_CASE_ : Optional[int] =True def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). __UpperCamelCase = kwargs.pop('text_config_dict' , __A ) __UpperCamelCase = kwargs.pop('vision_config_dict' , __A ) super().__init__(**__A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __UpperCamelCase = {} # This is the complete result when using `text_config_dict`. __UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __UpperCamelCase = {} # This is the complete result when using `vision_config_dict`. __UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __UpperCamelCase = { str(__A ): value for key, value in _vision_config_dict['id2label'].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __UpperCamelCase = {} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' ) if vision_config is None: __UpperCamelCase = {} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' ) __UpperCamelCase = AltCLIPTextConfig(**__A ) __UpperCamelCase = AltCLIPVisionConfig(**__A ) __UpperCamelCase = projection_dim __UpperCamelCase = logit_scale_init_value __UpperCamelCase = 1.0 @classmethod def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = copy.deepcopy(self.__dict__ ) __UpperCamelCase = self.text_config.to_dict() __UpperCamelCase = self.vision_config.to_dict() __UpperCamelCase = self.__class__.model_type return output
53
0
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 lowerCamelCase__ = get_tests_dir('fixtures') lowerCamelCase__ = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowerCamelCase__ = get_tests_dir('fixtures/dummy-config.json') class lowerCAmelCase__ ( unittest.TestCase ): def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : List[str] = 0 def lowerCAmelCase__ ( self : Dict ) ->int: '''simple docstring''' _UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(__A , __A ) def lowerCAmelCase__ ( self : str ) ->Optional[int]: '''simple docstring''' _UpperCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def lowerCAmelCase__ ( self : Dict ) ->Any: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase : Optional[int] = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally _UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(__A ).to_dict() config_dict.pop("feature_extractor_type" ) _UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor(**__A ) # save in new folder model_config.save_pretrained(__A ) config.save_pretrained(__A ) _UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(__A ) # make sure private variable is not incorrectly saved _UpperCAmelCase : Any = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(__A , __A ) def lowerCAmelCase__ ( self : Dict ) ->Dict: '''simple docstring''' _UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def lowerCAmelCase__ ( self : Dict ) ->int: '''simple docstring''' with self.assertRaisesRegex( __A , "bert-base is not a local folder and is not a valid model identifier" ): _UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained("bert-base" ) def lowerCAmelCase__ ( self : List[Any] ) ->Any: '''simple docstring''' with self.assertRaisesRegex( __A , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _UpperCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained(__A , revision="aaaaaa" ) def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]: '''simple docstring''' with self.assertRaisesRegex( __A , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): _UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" ) def lowerCAmelCase__ ( self : Tuple ) ->str: '''simple docstring''' with self.assertRaises(__A ): _UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__A ): _UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__A ) _UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__A ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__A ) _UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(__A , trust_remote_code=__A ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]: '''simple docstring''' try: AutoConfig.register("custom" , __A ) AutoFeatureExtractor.register(__A , __A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__A ): AutoFeatureExtractor.register(__A , __A ) # Now that the config is registered, it can be used as any other config with the auto-API _UpperCAmelCase : Optional[Any] = CustomFeatureExtractor.from_pretrained(__A ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__A ) _UpperCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(__A ) self.assertIsInstance(__A , __A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def lowerCAmelCase__ ( self : Any ) ->Optional[Any]: '''simple docstring''' class lowerCAmelCase__ ( __lowerCamelCase ): lowerCAmelCase : Dict = True try: AutoConfig.register("custom" , __A ) AutoFeatureExtractor.register(__A , __A ) # If remote code is not set, the default is to use local _UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. _UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__A ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub _UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__A ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) self.assertTrue(not hasattr(__A , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
234
'''simple docstring''' import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Any ) -> Optional[Any]: """simple docstring""" with open(__lowercase ) as metadata_file: __UpperCamelCase = json.load(__lowercase ) __UpperCamelCase = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['model_config'] ) # Load in the weights from the checkpoint_path __UpperCamelCase = torch.load(__lowercase , map_location='cpu' ) # Load the entity vocab file __UpperCamelCase = load_entity_vocab(__lowercase ) __UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks __UpperCamelCase = AddedToken('<ent>' , lstrip=__lowercase , rstrip=__lowercase ) __UpperCamelCase = AddedToken('<ent2>' , lstrip=__lowercase , rstrip=__lowercase ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(__lowercase ) with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(__lowercase , __lowercase ) __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase ) # Initialize the embeddings of the special tokens __UpperCamelCase = state_dict['embeddings.word_embeddings.weight'] __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 ) __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 ) __UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __UpperCamelCase = F'''encoder.layer.{layer_index}.attention.self.''' __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight'] __UpperCamelCase = entity_emb[entity_vocab['[MASK]']] __UpperCamelCase = LukeModel(config=__lowercase ).eval() __UpperCamelCase , __UpperCamelCase = model.load_state_dict(__lowercase , strict=__lowercase ) if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'''Missing keys {', '.join(__lowercase )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )): raise ValueError( 'Unexpected keys' F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' ) # Check outputs __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase , task='entity_classification' ) __UpperCamelCase = ( 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the' ' new world number one avoid a humiliating second- round exit at Wimbledon .' ) __UpperCamelCase = (39, 42) __UpperCamelCase = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors='pt' ) __UpperCamelCase = model(**__lowercase ) # Verify word hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 42, 1024) ) __UpperCamelCase = torch.tensor( [[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] ) else: # base __UpperCamelCase = torch.Size((1, 42, 768) ) __UpperCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 1, 1024) ) __UpperCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] ) else: # base __UpperCamelCase = torch.Size((1, 1, 768) ) __UpperCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(__lowercase ) ) model.save_pretrained(__lowercase ) def lowercase__ ( __lowercase : Dict ) -> List[str]: """simple docstring""" __UpperCamelCase = {} with open(__lowercase , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(__lowercase ): __UpperCamelCase , __UpperCamelCase = line.rstrip().split('\t' ) __UpperCamelCase = index return entity_vocab if __name__ == "__main__": a__ : Any =argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) a__ : str =parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
53
0
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) UpperCamelCase__ : Any = logging.getLogger() def UpperCAmelCase ( ) -> List[Any]: """simple docstring""" A_ : Dict = argparse.ArgumentParser() parser.add_argument("""-f""" ) A_ : Tuple = parser.parse_args() return args.f def UpperCAmelCase ( a_ ) -> Any: """simple docstring""" A_ : Optional[Any] = {} A_ : Optional[int] = os.path.join(__lowercase , """all_results.json""" ) if os.path.exists(__lowercase ): with open(__lowercase , """r""" ) as f: A_ : Optional[Any] = json.load(__lowercase ) else: raise ValueError(F"can\'t find {path}" ) return results def UpperCAmelCase ( ) -> Dict: """simple docstring""" A_ : Optional[Any] = torch.cuda.is_available() and torch_device == """cuda""" return is_using_cuda and is_apex_available() UpperCamelCase__ : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _lowerCAmelCase ( __lowerCamelCase ): """simple docstring""" @classmethod def UpperCAmelCase_ ( cls ) -> Any: # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU A_ : Tuple = tempfile.mkdtemp() A_ : Dict = os.path.join(cls.tmpdir , """default_config.yml""" ) write_basic_config(save_location=cls.configPath ) A_ : Dict = ["""accelerate""", """launch""", """--config_file""", cls.configPath] @classmethod def UpperCAmelCase_ ( cls ) -> Optional[int]: shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} ) def UpperCAmelCase_ ( self ) -> Tuple: A_ : List[str] = self.get_auto_remove_tmp_dir() A_ : List[Any] = F"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split() if is_cuda_and_apex_available(): testargs.append("""--fp16""" ) run_command(self._launch_args + testargs ) A_ : Dict = get_results(__A ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(__A , """epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , """glue_no_trainer""" ) ) ) @mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} ) def UpperCAmelCase_ ( self ) -> Optional[Any]: A_ : Union[str, Any] = self.get_auto_remove_tmp_dir() A_ : Union[str, Any] = F"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) A_ : Tuple = get_results(__A ) self.assertLess(result["""perplexity"""] , 100 ) self.assertTrue(os.path.exists(os.path.join(__A , """epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , """clm_no_trainer""" ) ) ) @mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : Optional[Any] = self.get_auto_remove_tmp_dir() A_ : Dict = F"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) A_ : Optional[int] = get_results(__A ) self.assertLess(result["""perplexity"""] , 42 ) self.assertTrue(os.path.exists(os.path.join(__A , """epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , """mlm_no_trainer""" ) ) ) @mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} ) def UpperCAmelCase_ ( self ) -> str: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu A_ : Dict = 7 if get_gpu_count() > 1 else 2 A_ : Tuple = self.get_auto_remove_tmp_dir() A_ : List[str] = F"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) A_ : Dict = get_results(__A ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 ) self.assertLess(result["""train_loss"""] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(__A , """epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , """ner_no_trainer""" ) ) ) @unittest.skip(reason="""Fix me @muellerzr""" ) @mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} ) def UpperCAmelCase_ ( self ) -> Tuple: A_ : int = self.get_auto_remove_tmp_dir() A_ : int = F"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) A_ : Union[str, Any] = get_results(__A ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["""eval_f1"""] , 28 ) self.assertGreaterEqual(result["""eval_exact"""] , 28 ) self.assertTrue(os.path.exists(os.path.join(__A , """epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , """qa_no_trainer""" ) ) ) @mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: A_ : Tuple = self.get_auto_remove_tmp_dir() A_ : Optional[Any] = F"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) A_ : Optional[Any] = get_results(__A ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(__A , """swag_no_trainer""" ) ) ) @slow @mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} ) def UpperCAmelCase_ ( self ) -> List[Any]: A_ : List[str] = self.get_auto_remove_tmp_dir() A_ : Dict = F"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) A_ : Dict = get_results(__A ) self.assertGreaterEqual(result["""eval_rouge1"""] , 10 ) self.assertGreaterEqual(result["""eval_rouge2"""] , 2 ) self.assertGreaterEqual(result["""eval_rougeL"""] , 7 ) self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 ) self.assertTrue(os.path.exists(os.path.join(__A , """epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , """summarization_no_trainer""" ) ) ) @slow @mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} ) def UpperCAmelCase_ ( self ) -> List[str]: A_ : List[str] = self.get_auto_remove_tmp_dir() A_ : Optional[int] = F"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs ) A_ : Optional[int] = get_results(__A ) self.assertGreaterEqual(result["""eval_bleu"""] , 30 ) self.assertTrue(os.path.exists(os.path.join(__A , """epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , """translation_no_trainer""" ) ) ) @slow def UpperCAmelCase_ ( self ) -> str: A_ : Any = logging.StreamHandler(sys.stdout ) logger.addHandler(__A ) A_ : Optional[int] = self.get_auto_remove_tmp_dir() A_ : Optional[int] = F"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split() run_command(self._launch_args + testargs ) A_ : str = get_results(__A ) self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.10 ) @mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} ) def UpperCAmelCase_ ( self ) -> Any: A_ : Optional[Any] = self.get_auto_remove_tmp_dir() A_ : Tuple = F"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split() if is_cuda_and_apex_available(): testargs.append("""--fp16""" ) run_command(self._launch_args + testargs ) A_ : Tuple = get_results(__A ) # The base model scores a 25% self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(__A , """step_1""" ) ) ) self.assertTrue(os.path.exists(os.path.join(__A , """image_classification_no_trainer""" ) ) )
344
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class snake_case ( __lowerCamelCase ): """simple docstring""" def _lowerCamelCase ( self : Any ): __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = 8 # DPR tok __UpperCamelCase = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __UpperCamelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok __UpperCamelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) ) __UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __UpperCamelCase = {'unk_token': '<unk>'} __UpperCamelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__A ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__A ) ) def _lowerCamelCase ( self : Tuple ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Optional[int] ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Union[str, Any] ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def _lowerCamelCase ( self : str ): shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _lowerCamelCase ( self : Any , __A : bool ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , ) if from_disk: __UpperCamelCase = os.path.join(self.tmpdirname , 'dataset' ) __UpperCamelCase = os.path.join(self.tmpdirname , 'index.faiss' ) dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) ) dataset.drop_index('embeddings' ) dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) ) del dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , ) return retriever def _lowerCamelCase ( self : int ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) __UpperCamelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' ) dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' ) pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) ) __UpperCamelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' ) __UpperCamelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset} pickle.dump(__A , open(__A , 'wb' ) ) __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , ) __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = self.get_dummy_dataset() retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_legacy_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] ) self.assertEqual(len(doc_dicts[0]['text'] ) , __A ) self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Optional[Any] ): import torch __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , np.ndarray ) __UpperCamelCase = retriever( __A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( # noqa: F841 out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], out['doc_ids'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dpr_ctx_encoder_tokenizer() __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) retriever.set_ctx_encoder_tokenizer(__A ) __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) self.assertEqual( len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
53
0
'''simple docstring''' lowercase__ = { "joule": 1.0, "kilojoule": 1000, "megajoule": 1000000, "gigajoule": 1000000000, "wattsecond": 1.0, "watthour": 3600, "kilowatthour": 3600000, "newtonmeter": 1.0, "calorie_nutr": 4186.8, "kilocalorie_nutr": 4186800.00, "electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9, "britishthermalunit_it": 1055.05585, "footpound": 1.355818, } def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: UpperCAmelCase : Any = ( F"""Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n""" F"""Valid values are: {", ".join(__lowercase )}""" ) raise ValueError(__lowercase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
151
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[Any] ={ '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] =[ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys a__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
0
from ....configuration_utils import PretrainedConfig from ....utils import logging __UpperCAmelCase = logging.get_logger(__name__) # TODO: upload to AWS __UpperCAmelCase = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class UpperCamelCase__ ( __lowerCamelCase ): """simple docstring""" UpperCAmelCase_ ="retribert" def __init__( self , _A=30522 , _A=768 , _A=8 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.02 , _A=1E-12 , _A=True , _A=128 , _A=0 , **_A , ) -> List[Any]: super().__init__(pad_token_id=__A , **__A ) SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = share_encoders SCREAMING_SNAKE_CASE_ = projection_dim
299
'''simple docstring''' import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any]=False ) -> Tuple: """simple docstring""" try: __UpperCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __UpperCamelCase = default else: # KEY is set, convert it to True or False. try: __UpperCamelCase = strtobool(__lowercase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value a__ : str =parse_flag_from_env('''RUN_SLOW''', default=False) a__ : Union[str, Any] =parse_flag_from_env('''RUN_REMOTE''', default=False) a__ : List[str] =parse_flag_from_env('''RUN_LOCAL''', default=True) a__ : Optional[int] =parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression a__ : Any =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') a__ : Optional[int] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') a__ : List[str] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio a__ : Any =pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam a__ : Tuple =pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility a__ : Union[str, Any] =pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows a__ : int =pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase__ ( __lowercase : Optional[Any] ) -> Optional[int]: """simple docstring""" try: import faiss # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires faiss' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Any: """simple docstring""" try: import regex # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires regex' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Tuple ) -> List[Any]: """simple docstring""" try: import elasticsearch # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires elasticsearch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Tuple: """simple docstring""" try: import sqlalchemy # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires sqlalchemy' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[str] ) -> List[str]: """simple docstring""" if not config.TORCH_AVAILABLE: __UpperCamelCase = unittest.skip('test requires PyTorch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[Any] ) -> List[str]: """simple docstring""" if not config.TF_AVAILABLE: __UpperCamelCase = unittest.skip('test requires TensorFlow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : int ) -> Union[str, Any]: """simple docstring""" if not config.JAX_AVAILABLE: __UpperCamelCase = unittest.skip('test requires JAX' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> Optional[Any]: """simple docstring""" if not config.PIL_AVAILABLE: __UpperCamelCase = unittest.skip('test requires Pillow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : int ) -> int: """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> int: """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> Any: """simple docstring""" def _require_spacy_model(__lowercase : Any ): try: import spacy # noqa F401 spacy.load(__lowercase ) except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(__lowercase ) )(__lowercase ) else: return test_case return _require_spacy_model def lowercase__ ( __lowercase : Union[str, Any] ) -> str: """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]: """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: __UpperCamelCase = unittest.skip('test is slow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_local_tests or _run_local_tests == 0: __UpperCamelCase = unittest.skip('test is local' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: __UpperCamelCase = unittest.skip('test is packaged' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Any: """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: __UpperCamelCase = unittest.skip('test requires remote' )(__lowercase ) return test_case def lowercase__ ( *__lowercase : Optional[Any] ) -> Tuple: """simple docstring""" def decorate(cls : int ): for name, fn in cls.__dict__.items(): if callable(__lowercase ) and name.startswith('test' ): for decorator in decorators: __UpperCamelCase = decorator(__lowercase ) setattr(cls , __lowercase , __lowercase ) return cls return decorate class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =0 SCREAMING_SNAKE_CASE_ : List[Any] =1 SCREAMING_SNAKE_CASE_ : Union[str, Any] =2 @contextmanager def lowercase__ ( __lowercase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , __lowercase : Dict=1e-16 ) -> List[Any]: """simple docstring""" __UpperCamelCase = requests.Session().request def timeout_request(__lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , **__lowercase : List[str] ): # Change the url to an invalid url so that the connection hangs __UpperCamelCase = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) __UpperCamelCase = timeout try: return online_request(__lowercase , __lowercase , **__lowercase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __UpperCamelCase = url __UpperCamelCase = e.args[0] __UpperCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]''' ),) __UpperCamelCase = (max_retry_error,) raise def raise_connection_error(__lowercase : int , __lowercase : List[str] , **__lowercase : Union[str, Any] ): raise requests.ConnectionError('Offline mode is enabled.' , request=__lowercase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , __lowercase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , __lowercase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , __lowercase ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def lowercase__ ( *__lowercase : Any , **__lowercase : Dict ) -> Dict: """simple docstring""" __UpperCamelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__lowercase , **__lowercase ) as tmp_dir: try: os.chdir(__lowercase ) yield finally: os.chdir(__lowercase ) @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase__ ( __lowercase : List[str] , __lowercase : int ) -> Union[str, Any]: """simple docstring""" return deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(__lowercase : List[Any] , *__lowercase : Tuple , **__lowercase : Union[str, Any] ): try: return func(*__lowercase , **__lowercase ) except HTTPError as err: if str(__lowercase ).startswith('500' ) or str(__lowercase ).startswith('502' ): pytest.xfail(str(__lowercase ) ) raise err return decorator.decorator(_wrapper , __lowercase ) class snake_case : """simple docstring""" def __init__( self : int , __A : Any , __A : str , __A : List[Any] ): __UpperCamelCase = returncode __UpperCamelCase = stdout __UpperCamelCase = stderr async def lowercase__ ( __lowercase : Any , __lowercase : Optional[int] ) -> str: """simple docstring""" while True: __UpperCamelCase = await stream.readline() if line: callback(__lowercase ) else: break async def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : Any=None , __lowercase : Optional[Any]=None , __lowercase : int=False , __lowercase : List[Any]=False ) -> _RunOutput: """simple docstring""" if echo: print('\nRunning: ' , ' '.join(__lowercase ) ) __UpperCamelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __UpperCamelCase = [] __UpperCamelCase = [] def tee(__lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Tuple="" ): __UpperCamelCase = line.decode('utf-8' ).rstrip() sink.append(__lowercase ) if not quiet: print(__lowercase , __lowercase , file=__lowercase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label='stderr:' ) ), ] , timeout=__lowercase , ) return _RunOutput(await p.wait() , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : int=180 , __lowercase : int=False , __lowercase : str=True ) -> _RunOutput: """simple docstring""" __UpperCamelCase = asyncio.get_event_loop() __UpperCamelCase = loop.run_until_complete( _stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) ) __UpperCamelCase = ' '.join(__lowercase ) if result.returncode > 0: __UpperCamelCase = '\n'.join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' ) return result def lowercase__ ( ) -> List[str]: """simple docstring""" __UpperCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) __UpperCamelCase = re.sub(R'^gw' , '' , __lowercase , 0 , re.M ) return int(__lowercase ) def lowercase__ ( ) -> List[Any]: """simple docstring""" __UpperCamelCase = 29500 __UpperCamelCase = pytest_xdist_worker_id() return port + uniq_delta
53
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase: List[str] = logging.get_logger(__name__) lowerCAmelCase: Optional[int] = { '''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''', '''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''', '''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''', '''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''', '''funnel-transformer/intermediate''': ( '''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json''' ), '''funnel-transformer/intermediate-base''': ( '''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json''' ), '''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''', '''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''', '''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''', '''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''', } class a__( __lowerCamelCase ): lowercase__ = "funnel" lowercase__ = { "hidden_size": "d_model", "num_attention_heads": "n_head", } def __init__( self : int , __snake_case : Any=3_05_22 , __snake_case : List[Any]=[4, 4, 4] , __snake_case : str=None , __snake_case : Optional[int]=2 , __snake_case : Tuple=7_68 , __snake_case : Dict=12 , __snake_case : Optional[int]=64 , __snake_case : Dict=30_72 , __snake_case : Optional[int]="gelu_new" , __snake_case : Any=0.1 , __snake_case : int=0.1 , __snake_case : Union[str, Any]=0.0 , __snake_case : Optional[Any]=0.1 , __snake_case : Tuple=None , __snake_case : Optional[int]=1e-9 , __snake_case : Any="mean" , __snake_case : List[str]="relative_shift" , __snake_case : List[Any]=True , __snake_case : List[str]=True , __snake_case : List[str]=True , **__snake_case : Optional[int] , ): a : Optional[Any] = vocab_size a : Optional[Any] = block_sizes a : List[str] = [1] * len(__A ) if block_repeats is None else block_repeats assert len(__A ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." a : Optional[int] = num_decoder_layers a : List[str] = d_model a : int = n_head a : List[Any] = d_head a : Optional[Any] = d_inner a : Dict = hidden_act a : List[Any] = hidden_dropout a : str = attention_dropout a : Tuple = activation_dropout a : Dict = initializer_range a : Optional[int] = initializer_std a : Dict = layer_norm_eps assert pooling_type in [ "mean", "max", ], F"""Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.""" a : int = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F"""Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.""" a : List[str] = attention_type a : int = separate_cls a : Dict = truncate_seq a : str = pool_q_only super().__init__(**__A ) @property def lowercase_ ( self : int ): return sum(self.block_sizes ) @num_hidden_layers.setter def lowercase_ ( self : List[Any] , __snake_case : List[Any] ): raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' ) @property def lowercase_ ( self : Optional[Any] ): return len(self.block_sizes ) @num_blocks.setter def lowercase_ ( self : Any , __snake_case : int ): raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
297
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys a__ : Tuple ='''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
53
0
"""simple docstring""" def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int: assert isinstance(__lowercase ,__lowercase ), f"The input value of [n={number}] is not an integer" if number == 1: return 2 elif number < 1: _lowerCAmelCase : Tuple = f"The input value of [n={number}] has to be > 0" raise ValueError(__lowercase ) else: _lowerCAmelCase : Union[str, Any] = sylvester(number - 1 ) _lowerCAmelCase : List[str] = num - 1 _lowerCAmelCase : Any = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester\'s sequence: {sylvester(8)}""")
44
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Tuple ) -> Tuple: """simple docstring""" return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :] def lowercase__ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] , __lowercase : List[str]="attention" ) -> Optional[Any]: """simple docstring""" __UpperCamelCase = __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] ) __UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] ) __UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] ) __UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] ) __UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : int , __lowercase : List[Any]=False ) -> Optional[Any]: """simple docstring""" if split_mlp_wi: __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :] __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :] __UpperCamelCase = (wi_a, wi_a) else: __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :] __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :] return wi, wo def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] ) -> str: """simple docstring""" return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i] def lowercase__ ( __lowercase : dict , *, __lowercase : int , __lowercase : bool , __lowercase : bool = False ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = traverse_util.flatten_dict(variables['target'] ) __UpperCamelCase = {'/'.join(__lowercase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old print('Split MLP:' , __lowercase ) __UpperCamelCase = collections.OrderedDict() # Shared embeddings. __UpperCamelCase = old['token_embedder/embedding'] # Encoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'encoder' , 'attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 1 (MLP). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_mlp_layer_norm' ) __UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'encoder' , __lowercase ) __UpperCamelCase = layer_norm if split_mlp_wi: __UpperCamelCase = wi[0].T __UpperCamelCase = wi[1].T else: __UpperCamelCase = wi.T __UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , __lowercase , 'encoder' ).T __UpperCamelCase = old['encoder/encoder_norm/scale'] if not scalable_attention: __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , 0 , 'encoder' ).T __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , 0 , 'decoder' ).T if not is_encoder_only: # Decoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_self_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'self_attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 1 (Cross Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_cross_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'encoder_decoder_attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 2 (MLP). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_mlp_layer_norm' ) __UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'decoder' , __lowercase ) __UpperCamelCase = layer_norm if split_mlp_wi: __UpperCamelCase = wi[0].T __UpperCamelCase = wi[1].T else: __UpperCamelCase = wi.T __UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase = tax_relpos_bias_lookup(__lowercase , __lowercase , 'decoder' ).T __UpperCamelCase = old['decoder/decoder_norm/scale'] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __UpperCamelCase = old['decoder/logits_dense/kernel'].T return new def lowercase__ ( __lowercase : Optional[Any] , __lowercase : bool ) -> int: """simple docstring""" __UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __UpperCamelCase = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __UpperCamelCase = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) __UpperCamelCase = state_dict['shared.weight'] return state_dict def lowercase__ ( __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = checkpoints.load_tax_checkpoint(__lowercase ) __UpperCamelCase = convert_tax_to_pytorch( __lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase , scalable_attention=__lowercase ) __UpperCamelCase = make_state_dict(__lowercase , __lowercase ) model.load_state_dict(__lowercase , strict=__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : bool = False , __lowercase : bool = False , ) -> Optional[int]: """simple docstring""" __UpperCamelCase = MTaConfig.from_json_file(__lowercase ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __UpperCamelCase = UMTaEncoderModel(__lowercase ) else: __UpperCamelCase = UMTaForConditionalGeneration(__lowercase ) # Load weights from tf checkpoint load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(__lowercase ) # Verify that we can load the checkpoint. model.from_pretrained(__lowercase ) print('Done' ) if __name__ == "__main__": a__ : List[Any] =argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) parser.add_argument( '''--scalable_attention''', action='''store_true''', help='''Whether the model uses scaled attention (umt5 model)''', default=False, ) a__ : List[str] =parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
53
0
"""simple docstring""" A: List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} A: Optional[int] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _snake_case ( UpperCamelCase : dict[int, list[int]] , UpperCamelCase : int , UpperCamelCase : list[bool] ): UpperCAmelCase : Optional[Any] = True UpperCAmelCase : List[Any] = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(__lowercase , __lowercase , __lowercase ) order.append(__lowercase ) return order def _snake_case ( UpperCamelCase : dict[int, list[int]] , UpperCamelCase : int , UpperCamelCase : list[bool] ): UpperCAmelCase : int = True UpperCAmelCase : Any = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(__lowercase , __lowercase , __lowercase ) return component def _snake_case ( UpperCamelCase : dict[int, list[int]] ): UpperCAmelCase : List[str] = len(__lowercase ) * [False] UpperCAmelCase : Optional[Any] = {vert: [] for vert in range(len(__lowercase ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(__lowercase ) UpperCAmelCase : Any = [] for i, was_visited in enumerate(__lowercase ): if not was_visited: order += topology_sort(__lowercase , __lowercase , __lowercase ) UpperCAmelCase : Dict = [] UpperCAmelCase : Optional[int] = len(__lowercase ) * [False] for i in range(len(__lowercase ) ): UpperCAmelCase : List[Any] = order[len(__lowercase ) - i - 1] if not visited[vert]: UpperCAmelCase : Optional[Any] = find_components(__lowercase , __lowercase , __lowercase ) components_list.append(__lowercase ) return components_list
109
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ : List[Any] ="BlipImageProcessor" SCREAMING_SNAKE_CASE_ : Optional[int] =("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , __A : Optional[int] , __A : List[Any] ): __UpperCamelCase = False super().__init__(__A , __A ) __UpperCamelCase = self.image_processor def __call__( self : List[Any] , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ): if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: __UpperCamelCase = self.tokenizer __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) return text_encoding # add pixel_values __UpperCamelCase = self.image_processor(__A , return_tensors=__A ) if text is not None: __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) else: __UpperCamelCase = None if text_encoding is not None: encoding_image_processor.update(__A ) return encoding_image_processor def _lowerCamelCase ( self : List[Any] , *__A : Dict , **__A : Optional[int] ): return self.tokenizer.batch_decode(*__A , **__A ) def _lowerCamelCase ( self : List[Any] , *__A : List[str] , **__A : Dict ): return self.tokenizer.decode(*__A , **__A ) @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.tokenizer.model_input_names __UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
53
0
UpperCAmelCase_ = { '''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''', '''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''', '''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''', '''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''', '''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''', '''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''', ''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''', '''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''', '''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/''' } # Exclamation mark is not in ITU-R recommendation # fmt: on UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()} def lowerCamelCase__ ( A__ : str ): '''simple docstring''' return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def lowerCamelCase__ ( A__ : str ): '''simple docstring''' return "".join(REVERSE_DICT[char] for char in message.split() ) def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = """Morse code here!""" print(__lowercase ) __lowerCamelCase = encrypt(__lowercase ) print(__lowercase ) __lowerCamelCase = decrypt(__lowercase ) print(__lowercase ) if __name__ == "__main__": main()
12
'''simple docstring''' from __future__ import annotations from typing import Any class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case : """simple docstring""" def __init__( self : List[Any] , __A : Any ): __UpperCamelCase = data __UpperCamelCase = None def __iter__( self : Optional[Any] ): __UpperCamelCase = self __UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(__A ) yield node.data __UpperCamelCase = node.next_node @property def _lowerCamelCase ( self : List[str] ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": a__ : Dict =Node(1) a__ : Optional[int] =Node(2) a__ : List[str] =Node(3) a__ : Optional[int] =Node(4) print(root_node.has_loop) # False a__ : str =root_node.next_node print(root_node.has_loop) # True a__ : Optional[int] =Node(5) a__ : List[Any] =Node(6) a__ : int =Node(5) a__ : Tuple =Node(6) print(root_node.has_loop) # False a__ : str =Node(1) print(root_node.has_loop) # False
53
0
import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : int = logging.get_logger(__name__) def A ( _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : List[str] = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append( (f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''), ('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''), ('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''), ('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''), ('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''), ('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''), ] ) return rename_keys def A ( _lowercase , _lowercase ): for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" ) SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[ : encoder_config.hidden_size, : ] SCREAMING_SNAKE_CASE : int = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE : Dict = in_proj_weight[ -encoder_config.hidden_size :, : ] def A ( _lowercase , _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Optional[int] = dct.pop(__lowercase ) SCREAMING_SNAKE_CASE : List[str] = val def A ( _lowercase ): if "handwritten" in checkpoint_url: SCREAMING_SNAKE_CASE : Optional[int] = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg''' SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' ) return im @torch.no_grad() def A ( _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : int = ViTConfig(image_size=384 , qkv_bias=__lowercase ) SCREAMING_SNAKE_CASE : Dict = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: SCREAMING_SNAKE_CASE : Tuple = 768 elif "large" in checkpoint_url: # use ViT-large encoder SCREAMING_SNAKE_CASE : Union[str, Any] = 1_024 SCREAMING_SNAKE_CASE : int = 4_096 SCREAMING_SNAKE_CASE : Any = 24 SCREAMING_SNAKE_CASE : str = 16 SCREAMING_SNAKE_CASE : str = 1_024 else: raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: SCREAMING_SNAKE_CASE : Optional[int] = False SCREAMING_SNAKE_CASE : str = '''relu''' SCREAMING_SNAKE_CASE : List[str] = 1_024 SCREAMING_SNAKE_CASE : Union[str, Any] = True SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : Union[str, Any] = False # load HuggingFace model SCREAMING_SNAKE_CASE : Any = ViTModel(__lowercase , add_pooling_layer=__lowercase ) SCREAMING_SNAKE_CASE : str = TrOCRForCausalLM(__lowercase ) SCREAMING_SNAKE_CASE : Any = VisionEncoderDecoderModel(encoder=__lowercase , decoder=__lowercase ) model.eval() # load state_dict of original model, rename some keys SCREAMING_SNAKE_CASE : Optional[Any] = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' , check_hash=__lowercase )['''model'''] SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(__lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): SCREAMING_SNAKE_CASE : int = state_dict.pop(__lowercase ) if key.startswith('''decoder''' ) and "output_projection" not in key: SCREAMING_SNAKE_CASE : str = val else: SCREAMING_SNAKE_CASE : Optional[Any] = val # load state dict model.load_state_dict(__lowercase ) # Check outputs on an image SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor(size=encoder_config.image_size ) SCREAMING_SNAKE_CASE : Optional[int] = RobertaTokenizer.from_pretrained('''roberta-large''' ) SCREAMING_SNAKE_CASE : Optional[int] = TrOCRProcessor(__lowercase , __lowercase ) SCREAMING_SNAKE_CASE : int = processor(images=prepare_img(__lowercase ) , return_tensors='''pt''' ).pixel_values # verify logits SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) SCREAMING_SNAKE_CASE : str = model(pixel_values=__lowercase , decoder_input_ids=__lowercase ) SCREAMING_SNAKE_CASE : List[str] = outputs.logits SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size([1, 1, 50_265] ) if "trocr-base-handwritten" in checkpoint_url: SCREAMING_SNAKE_CASE : List[Any] = torch.tensor( [-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] ) elif "trocr-large-handwritten" in checkpoint_url: SCREAMING_SNAKE_CASE : List[str] = torch.tensor( [-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] ) elif "trocr-base-printed" in checkpoint_url: SCREAMING_SNAKE_CASE : Any = torch.tensor( [-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] ) elif "trocr-large-printed" in checkpoint_url: SCREAMING_SNAKE_CASE : int = torch.tensor( [-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] , __lowercase , atol=1e-3 ), "First elements of logits not as expected" Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowercase ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(__lowercase ) if __name__ == "__main__": __UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt', type=str, help='URL to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) __UpperCamelCase : str = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
182
'''simple docstring''' a__ : Optional[Any] =256 # Modulus to hash a string a__ : Dict =1_000_003 def lowercase__ ( __lowercase : str , __lowercase : str ) -> bool: """simple docstring""" __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) if p_len > t_len: return False __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 1 # Calculating the hash of pattern and substring of text for i in range(__lowercase ): __UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __UpperCamelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __UpperCamelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def lowercase__ ( ) -> None: """simple docstring""" __UpperCamelCase = 'abc1abc12' __UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __UpperCamelCase = 'alskfjaldsk23adsfabcabc' assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase ) # Test 2) __UpperCamelCase = 'ABABX' __UpperCamelCase = 'ABABZABABYABABX' assert rabin_karp(__lowercase , __lowercase ) # Test 3) __UpperCamelCase = 'AAAB' __UpperCamelCase = 'ABAAAAAB' assert rabin_karp(__lowercase , __lowercase ) # Test 4) __UpperCamelCase = 'abcdabcy' __UpperCamelCase = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(__lowercase , __lowercase ) # Test 5) __UpperCamelCase = 'Lü' __UpperCamelCase = 'Lüsai' assert rabin_karp(__lowercase , __lowercase ) __UpperCamelCase = 'Lue' assert not rabin_karp(__lowercase , __lowercase ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
53
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = '''▁''' _A = {'''vocab_file''': '''sentencepiece.bpe.model'''} _A = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } _A = { '''facebook/xglm-564M''': 2048, } class UpperCAmelCase__ ( __lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_ = None , **A_ , ) -> Optional[int]: __UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer __UpperCamelCase =7 __UpperCamelCase =[f'<madeupword{i}>' for i in range(self.num_madeup_words )] __UpperCamelCase =kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , ) __UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__A ) ) __UpperCamelCase =vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __UpperCamelCase =1 # Mimic fairseq token-to-id alignment for the first 4 token __UpperCamelCase ={'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} __UpperCamelCase =len(self.sp_model ) __UpperCamelCase ={f'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(__A ) __UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> int: __UpperCamelCase =self.__dict__.copy() __UpperCamelCase =None __UpperCamelCase =self.sp_model.serialized_model_proto() return state def __setstate__( self , A_ ) -> Any: __UpperCamelCase =d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __UpperCamelCase ={} __UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _a ( self , A_ , A_ = None ) -> Tuple: if token_ids_a is None: return [self.sep_token_id] + token_ids_a __UpperCamelCase =[self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def _a ( self , A_ , A_ = None , A_ = False ) -> Union[str, Any]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) def _a ( self , A_ , A_ = None ) -> List[str]: __UpperCamelCase =[self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def _a ( self ) -> int: return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def _a ( self ) -> int: __UpperCamelCase ={self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self , A_ ) -> int: return self.sp_model.encode(__A , out_type=__A ) def _a ( self , A_ ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCamelCase =self.sp_model.PieceToId(__A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _a ( self , A_ ) -> Any: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _a ( self , A_ ) -> Dict: __UpperCamelCase =''.join(__A ).replace(__A , ' ' ).strip() return out_string def _a ( self , A_ , A_ = None ) -> Tuple: if not os.path.isdir(__A ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __UpperCamelCase =os.path.join( __A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __A ) elif not os.path.isfile(self.vocab_file ): with open(__A , 'wb' ) as fi: __UpperCamelCase =self.sp_model.serialized_model_proto() fi.write(__A ) return (out_vocab_file,)
62
'''simple docstring''' from __future__ import annotations class snake_case : """simple docstring""" def __init__( self : Optional[int] , __A : list[list[int]] ): __UpperCamelCase = TypeError( 'Matrices must be formed from a list of zero or more lists containing at ' 'least one and the same number of values, each of which must be of type ' 'int or float.' ) if len(__A ) != 0: __UpperCamelCase = len(rows[0] ) if cols == 0: raise error for row in rows: if len(__A ) != cols: raise error for value in row: if not isinstance(__A , (int, float) ): raise error __UpperCamelCase = rows else: __UpperCamelCase = [] def _lowerCamelCase ( self : int ): return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def _lowerCamelCase ( self : str ): return len(self.rows ) @property def _lowerCamelCase ( self : Any ): return len(self.rows[0] ) @property def _lowerCamelCase ( self : Optional[Any] ): return (self.num_rows, self.num_columns) @property def _lowerCamelCase ( self : Dict ): return self.order[0] == self.order[1] def _lowerCamelCase ( self : Any ): __UpperCamelCase = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Any ): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def _lowerCamelCase ( self : List[str] ): return bool(self.determinant() ) def _lowerCamelCase ( self : Dict , __A : int , __A : int ): __UpperCamelCase = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(__A ).determinant() def _lowerCamelCase ( self : Dict , __A : int , __A : int ): if (row + column) % 2 == 0: return self.get_minor(__A , __A ) return -1 * self.get_minor(__A , __A ) def _lowerCamelCase ( self : List[str] ): return Matrix( [ [self.get_minor(__A , __A ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def _lowerCamelCase ( self : Union[str, Any] ): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = self.determinant() if not determinant: raise TypeError('Only matrices with a non-zero determinant have an inverse' ) return self.adjugate() * (1 / determinant) def __repr__( self : Optional[Any] ): return str(self.rows ) def __str__( self : Union[str, Any] ): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '[' + '. '.join([str(__A ) for value in row] ) + '.]' for row in self.rows ] ) + "]" ) def _lowerCamelCase ( self : List[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError('Row must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in row: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_columns: raise ValueError( 'Row must be equal in length to the other rows in the matrix' ) if position is None: self.rows.append(__A ) else: __UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:] def _lowerCamelCase ( self : Optional[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError( 'Column must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in column: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_rows: raise ValueError( 'Column must be equal in length to the other columns in the matrix' ) if position is None: __UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: __UpperCamelCase = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self : Tuple , __A : object ): if not isinstance(__A , __A ): return NotImplemented return self.rows == other.rows def __ne__( self : Any , __A : object ): return not self == other def __neg__( self : List[Any] ): return self * -1 def __add__( self : List[str] , __A : Matrix ): if self.order != other.order: raise ValueError('Addition requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self : str , __A : Matrix ): if self.order != other.order: raise ValueError('Subtraction requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self : str , __A : Matrix | int | float ): if isinstance(__A , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(__A , __A ): if self.num_columns != other.num_rows: raise ValueError( 'The number of columns in the first matrix must ' 'be equal to the number of rows in the second' ) return Matrix( [ [Matrix.dot_product(__A , __A ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( 'A Matrix can only be multiplied by an int, float, or another matrix' ) def __pow__( self : Union[str, Any] , __A : int ): if not isinstance(__A , __A ): raise TypeError('A Matrix can only be raised to the power of an int' ) if not self.is_square: raise ValueError('Only square matrices can be raised to a power' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( 'Only invertable matrices can be raised to a negative power' ) __UpperCamelCase = self for _ in range(other - 1 ): result *= self return result @classmethod def _lowerCamelCase ( cls : Tuple , __A : list[int] , __A : list[int] ): return sum(row[i] * column[i] for i in range(len(__A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
53
0
'''simple docstring''' def _lowerCamelCase ( lowercase : int ) -> bool: if num < 0: return False _a = num _a = 0 while num > 0: _a = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
63
'''simple docstring''' import os import numpy import onnx def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Dict: """simple docstring""" __UpperCamelCase = a.name __UpperCamelCase = b.name __UpperCamelCase = '' __UpperCamelCase = '' __UpperCamelCase = a == b __UpperCamelCase = name_a __UpperCamelCase = name_b return res def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : List[Any] ) -> Optional[int]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(__lowercase , __lowercase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) _graph_replace_input_with(node_proto.attribute[1].g , __lowercase , __lowercase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) def lowercase__ ( __lowercase : int , __lowercase : List[Any] , __lowercase : Dict ) -> int: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(__lowercase , __lowercase , __lowercase ) def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : str ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __UpperCamelCase = inits[i].name __UpperCamelCase = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = os.path.dirname(__lowercase ) __UpperCamelCase = os.path.basename(__lowercase ) __UpperCamelCase = onnx.load(os.path.join(__lowercase , __lowercase ) ) __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = set() __UpperCamelCase = {} __UpperCamelCase = [] __UpperCamelCase = 0 for i in range(len(__lowercase ) ): if i in dup_set: continue for j in range(i + 1 , len(__lowercase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(__lowercase ) dup_set.add(__lowercase ) __UpperCamelCase = inits[j].data_type __UpperCamelCase = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , __lowercase ) total_reduced_size += mem_size __UpperCamelCase = inits[i].name __UpperCamelCase = inits[j].name if name_i in dup_map: dup_map[name_i].append(__lowercase ) else: __UpperCamelCase = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) __UpperCamelCase = sorted(__lowercase ) _remove_dup_initializers_from_model(__lowercase , __lowercase , __lowercase ) __UpperCamelCase = 'optimized_' + model_file_name __UpperCamelCase = os.path.join(__lowercase , __lowercase ) onnx.save(__lowercase , __lowercase ) return new_model
53
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule lowerCamelCase__ = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
234
'''simple docstring''' import random def lowercase__ ( __lowercase : list , __lowercase : Optional[Any] ) -> tuple: """simple docstring""" __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = [], [], [] for element in data: if element < pivot: less.append(__lowercase ) elif element > pivot: greater.append(__lowercase ) else: equal.append(__lowercase ) return less, equal, greater def lowercase__ ( __lowercase : list , __lowercase : int ) -> Dict: """simple docstring""" if index >= len(__lowercase ) or index < 0: return None __UpperCamelCase = items[random.randint(0 , len(__lowercase ) - 1 )] __UpperCamelCase = 0 __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = _partition(__lowercase , __lowercase ) __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__lowercase , __lowercase ) # must be in larger else: return quick_select(__lowercase , index - (m + count) )
53
0
'''simple docstring''' def UpperCAmelCase ( a_ , a_ ) -> int: """simple docstring""" while second != 0: A_ : Any = first & second first ^= second A_ : Any = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ : Union[str, Any] = int(input('Enter the first number: ').strip()) UpperCamelCase__ : Union[str, Any] = int(input('Enter the second number: ').strip()) print(f'{add(first, second) = }')
344
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowercase__ ( __lowercase : Any ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def lowercase__ ( __lowercase : Tuple ) -> int: """simple docstring""" __UpperCamelCase , __UpperCamelCase = emb.weight.shape __UpperCamelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) __UpperCamelCase = emb.weight.data return lin_layer def lowercase__ ( __lowercase : int , __lowercase : List[str]="facebook/mbart-large-en-ro" , __lowercase : str=False , __lowercase : List[Any]=False ) -> int: """simple docstring""" __UpperCamelCase = torch.load(__lowercase , map_location='cpu' )['model'] remove_ignore_keys_(__lowercase ) __UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0] __UpperCamelCase = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase ) if mbart_aa and finetuned: __UpperCamelCase = 'relu' __UpperCamelCase = state_dict['decoder.embed_tokens.weight'] __UpperCamelCase = MBartForConditionalGeneration(__lowercase ) model.model.load_state_dict(__lowercase ) if finetuned: __UpperCamelCase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a__ : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') a__ : Union[str, Any] =parser.parse_args() a__ : str =convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
53
0
'''simple docstring''' from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' , [None, 'v2'] ) def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase : Optional[Any] = hf_hub_url(repo_id=__lowercase , path=__lowercase , revision=__lowercase ) assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(__lowercase )}"""
151
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : Any , __A : Dict , __A : str , __A : List[Any]=1_0_2_4 , __A : Tuple=1_0_2_4 , __A : str=3.6 ): __UpperCamelCase = tokenizer __UpperCamelCase = tokenizer.bos_token_id __UpperCamelCase = dataset __UpperCamelCase = seq_length __UpperCamelCase = seq_length * chars_per_token * num_of_sequences def __iter__( self : Any ): __UpperCamelCase = iter(self.dataset ) __UpperCamelCase = True while more_examples: __UpperCamelCase , __UpperCamelCase = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(__A )['content'] ) buffer_len += len(buffer[-1] ) except StopIteration: __UpperCamelCase = False break __UpperCamelCase = tokenizer(__A , truncation=__A )['input_ids'] __UpperCamelCase = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(__A ) , self.seq_length ): __UpperCamelCase = all_token_ids[i : i + self.seq_length] if len(__A ) == self.seq_length: yield torch.tensor(__A ) def lowercase__ ( __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = {'streaming': True} __UpperCamelCase = load_dataset(args.dataset_name , split='train' , **__lowercase ) __UpperCamelCase = ConstantLengthDataset(__lowercase , __lowercase , seq_length=args.seq_length ) __UpperCamelCase = DataLoader(__lowercase , batch_size=args.batch_size ) return eval_dataloader def lowercase__ ( __lowercase : Tuple ) -> Optional[Any]: """simple docstring""" model.eval() __UpperCamelCase = [] for step, batch in enumerate(__lowercase ): with torch.no_grad(): __UpperCamelCase = model(__lowercase , labels=__lowercase ) __UpperCamelCase = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__lowercase ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break __UpperCamelCase = torch.mean(torch.cat(__lowercase ) ) try: __UpperCamelCase = torch.exp(__lowercase ) except OverflowError: __UpperCamelCase = float('inf' ) return loss.item(), perplexity.item() # Setup Accelerator a__ : int =Accelerator() # Parse configuration a__ : Dict =HfArgumentParser(EvaluationArguments) a__ : Union[str, Any] =parser.parse_args() set_seed(args.seed) # Logging a__ : List[Any] =logging.getLogger(__name__) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) # Load model and tokenizer a__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(args.model_ckpt) a__ : List[Any] =AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader a__ : Union[str, Any] =create_dataloader(args) # Prepare everything with our `accelerator`. a__ , a__ : List[str] =accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('''Evaluating and saving model after training''') a__ , a__ : Any =evaluate(args) logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
53
0
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.02 , _A=4 , ) -> Tuple: SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_attention_mask SCREAMING_SNAKE_CASE_ = use_token_type_ids SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = type_vocab_size SCREAMING_SNAKE_CASE_ = type_sequence_label_size SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = num_choices def _UpperCamelCase ( self ) -> int: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE_ = None if self.use_attention_mask: SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE_ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE_ = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _UpperCamelCase ( self ) -> Optional[int]: SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class UpperCamelCase__ ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ =True UpperCAmelCase_ =( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _UpperCamelCase ( self ) -> int: SCREAMING_SNAKE_CASE_ = FlaxRoFormerModelTester(self ) @slow def _UpperCamelCase ( self ) -> str: for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__A ) SCREAMING_SNAKE_CASE_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(__A ) @require_flax class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def _UpperCamelCase ( self ) -> str: SCREAMING_SNAKE_CASE_ = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) SCREAMING_SNAKE_CASE_ = jnp.array([[0, 1, 2, 3, 4, 5]] ) SCREAMING_SNAKE_CASE_ = model(__A )[0] SCREAMING_SNAKE_CASE_ = 50000 SCREAMING_SNAKE_CASE_ = (1, 6, vocab_size) self.assertEqual(output.shape , __A ) SCREAMING_SNAKE_CASE_ = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
299
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging a__ : Any =logging.get_logger(__name__) a__ : Optional[Any] ={ '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict ="gpt_neo" SCREAMING_SNAKE_CASE_ : Optional[int] =["past_key_values"] SCREAMING_SNAKE_CASE_ : List[Any] ={"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Union[str, Any] , __A : Union[str, Any]=5_0_2_5_7 , __A : Any=2_0_4_8 , __A : Optional[Any]=2_0_4_8 , __A : Any=2_4 , __A : Union[str, Any]=[[["global", "local"], 1_2]] , __A : str=1_6 , __A : Optional[int]=None , __A : Union[str, Any]=2_5_6 , __A : Any="gelu_new" , __A : Dict=0.0 , __A : Optional[int]=0.0 , __A : int=0.0 , __A : List[str]=0.1 , __A : Any=1e-5 , __A : int=0.02 , __A : List[str]=True , __A : Tuple=5_0_2_5_6 , __A : Optional[Any]=5_0_2_5_6 , **__A : Optional[Any] , ): __UpperCamelCase = vocab_size __UpperCamelCase = max_position_embeddings __UpperCamelCase = hidden_size __UpperCamelCase = num_layers __UpperCamelCase = num_heads __UpperCamelCase = intermediate_size __UpperCamelCase = window_size __UpperCamelCase = activation_function __UpperCamelCase = resid_dropout __UpperCamelCase = embed_dropout __UpperCamelCase = attention_dropout __UpperCamelCase = classifier_dropout __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = initializer_range __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id __UpperCamelCase = attention_types __UpperCamelCase = self.expand_attention_types_params(__A ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=__A , eos_token_id=__A , **__A ) @staticmethod def _lowerCamelCase ( __A : Tuple ): __UpperCamelCase = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase__ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Any: """simple docstring""" import torch __UpperCamelCase = input.size() __UpperCamelCase = len(__lowercase ) __UpperCamelCase = shape[dimension] __UpperCamelCase = torch.arange(0 , __lowercase , __lowercase ) __UpperCamelCase = torch.div(sizedim - size , __lowercase , rounding_mode='floor' ) + 1 __UpperCamelCase = torch.arange(__lowercase ) + low_indices[:min_length][:, None] __UpperCamelCase = [slice(__lowercase )] * rank __UpperCamelCase = indices __UpperCamelCase = input[s] __UpperCamelCase = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Optional[int]: """simple docstring""" import torch __UpperCamelCase = torch.arange(1 , __lowercase ) __UpperCamelCase = torch.remainder(__lowercase , __lowercase ) __UpperCamelCase = remainders == 0 __UpperCamelCase = candidates[divisor_indices] __UpperCamelCase = torch.max(__lowercase ) return largest_divisor, torch.div(__lowercase , __lowercase , rounding_mode='floor' ) class snake_case ( __lowerCamelCase ): """simple docstring""" @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(__A , direction='inputs' ) __UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return common_inputs @property def _lowerCamelCase ( self : int ): return self._config.num_heads def _lowerCamelCase ( self : List[str] , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ): __UpperCamelCase = super(__A , self ).generate_dummy_inputs( __A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A ) # We need to order the input in the way they appears in the forward() __UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCamelCase = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] __UpperCamelCase = common_inputs['attention_mask'] if self.use_past: __UpperCamelCase = ordered_inputs['attention_mask'].dtype __UpperCamelCase = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 ) return ordered_inputs @property def _lowerCamelCase ( self : Dict ): return 1_3
53
0
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowerCAmelCase: Any = logging.get_logger(__name__) lowerCAmelCase: Optional[Any] = { '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class a__( __lowerCamelCase ): lowercase__ = "gpt_neo" lowercase__ = ["past_key_values"] lowercase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Union[str, Any] , __snake_case : Union[str, Any]=5_02_57 , __snake_case : Any=20_48 , __snake_case : Optional[Any]=20_48 , __snake_case : Any=24 , __snake_case : Union[str, Any]=[[["global", "local"], 12]] , __snake_case : str=16 , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=2_56 , __snake_case : Any="gelu_new" , __snake_case : Dict=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : int=0.0 , __snake_case : List[str]=0.1 , __snake_case : Any=1e-5 , __snake_case : int=0.02 , __snake_case : List[str]=True , __snake_case : Tuple=5_02_56 , __snake_case : Optional[Any]=5_02_56 , **__snake_case : Optional[Any] , ): a : List[Any] = vocab_size a : Tuple = max_position_embeddings a : Union[str, Any] = hidden_size a : Dict = num_layers a : int = num_heads a : Tuple = intermediate_size a : int = window_size a : Dict = activation_function a : Union[str, Any] = resid_dropout a : str = embed_dropout a : Optional[int] = attention_dropout a : Union[str, Any] = classifier_dropout a : int = layer_norm_epsilon a : int = initializer_range a : int = use_cache a : Optional[Any] = bos_token_id a : List[Any] = eos_token_id a : Any = attention_types a : Optional[Any] = self.expand_attention_types_params(__A ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' F"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """ F"""`config.num_layers = {self.num_layers}`. """ '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=__A , eos_token_id=__A , **__A ) @staticmethod def lowercase_ ( __snake_case : Tuple ): a : Union[str, Any] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCamelCase__ ( _A , _A , _A , _A ): import torch a : Any = input.size() a : Union[str, Any] = len(__lowercase ) a : Any = shape[dimension] a : Any = torch.arange(0 , __lowercase , __lowercase ) a : str = torch.div(sizedim - size , __lowercase , rounding_mode='floor' ) + 1 a : str = torch.arange(__lowercase ) + low_indices[:min_length][:, None] a : Optional[int] = [slice(__lowercase )] * rank a : Any = indices a : List[Any] = input[s] a : Optional[Any] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__lowercase ) def lowerCamelCase__ ( _A , _A ): import torch a : Union[str, Any] = torch.arange(1 , __lowercase ) a : Tuple = torch.remainder(__lowercase , __lowercase ) a : List[str] = remainders == 0 a : Optional[int] = candidates[divisor_indices] a : Optional[int] = torch.max(__lowercase ) return largest_divisor, torch.div(__lowercase , __lowercase , rounding_mode='floor' ) class a__( __lowerCamelCase ): @property def lowercase_ ( self : Tuple ): a : Any = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(__A , direction='inputs' ) a : List[str] = {0: 'batch', 1: 'past_sequence + sequence'} else: a : Optional[int] = {0: 'batch', 1: 'sequence'} return common_inputs @property def lowercase_ ( self : int ): return self._config.num_heads def lowercase_ ( self : List[str] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ): a : Dict = super(__A , self ).generate_dummy_inputs( __A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A ) # We need to order the input in the way they appears in the forward() a : str = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch a , a : Union[str, Any] = common_inputs['input_ids'].shape # Not using the same length for past_key_values a : Any = seqlen + 2 a : List[str] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) a : Any = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] a : Optional[int] = common_inputs['attention_mask'] if self.use_past: a : Any = ordered_inputs['attention_mask'].dtype a : Any = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 ) return ordered_inputs @property def lowercase_ ( self : Dict ): return 13
297
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa" SCREAMING_SNAKE_CASE_ : Dict =( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) SCREAMING_SNAKE_CASE_ : List[str] ="document_qa" SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"] SCREAMING_SNAKE_CASE_ : Any =["text"] def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ): if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*__A , **__A ) def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ): __UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' __UpperCamelCase = task_prompt.replace('{user_input}' , __A ) __UpperCamelCase = self.pre_processor.tokenizer( __A , add_special_tokens=__A , return_tensors='pt' ).input_ids __UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ): return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences def _lowerCamelCase ( self : Tuple , __A : List[Any] ): __UpperCamelCase = self.pre_processor.batch_decode(__A )[0] __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) __UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token __UpperCamelCase = self.pre_processor.tokenajson(__A ) return sequence["answer"]
53
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _a : List[str] = { '''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''], '''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Tuple = [ '''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AdaptiveEmbedding''', '''TransfoXLForSequenceClassification''', '''TransfoXLLMHeadModel''', '''TransfoXLModel''', '''TransfoXLPreTrainedModel''', '''load_tf_weights_in_transfo_xl''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Tuple = [ '''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFAdaptiveEmbedding''', '''TFTransfoXLForSequenceClassification''', '''TFTransfoXLLMHeadModel''', '''TFTransfoXLMainLayer''', '''TFTransfoXLModel''', '''TFTransfoXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys _a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
44
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
53
0
"""simple docstring""" def _snake_case ( UpperCamelCase : int = 10 ): if not isinstance(__lowercase , __lowercase ) or n < 0: raise ValueError("""Invalid input""" ) UpperCAmelCase : Dict = 10**n UpperCAmelCase : Dict = 28433 * (pow(2 , 7830457 , __lowercase )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f"""{solution(1_0) = }""")
109
'''simple docstring''' import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase__ ( __lowercase : Features ) -> Optional[int]: """simple docstring""" __UpperCamelCase = np.inf def set_batch_size(__lowercase : FeatureType ) -> None: nonlocal batch_size if isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary": __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__lowercase , __lowercase ) return None if batch_size is np.inf else batch_size class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : List[str] , __A : NestedDataStructureLike[PathLike] , __A : Optional[NamedSplit] = None , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[int] = None , **__A : Dict , ): super().__init__( __A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , ) __UpperCamelCase = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths} __UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1] __UpperCamelCase = Parquet( cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , ) def _lowerCamelCase ( self : Optional[int] ): # Build iterable dataset if self.streaming: __UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None self.builder.download_and_prepare( download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , ) __UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=__A , in_memory=self.keep_in_memory ) return dataset class snake_case : """simple docstring""" def __init__( self : List[str] , __A : Dataset , __A : Union[PathLike, BinaryIO] , __A : Optional[int] = None , **__A : Dict , ): __UpperCamelCase = dataset __UpperCamelCase = path_or_buf __UpperCamelCase = batch_size or get_writer_batch_size(dataset.features ) __UpperCamelCase = parquet_writer_kwargs def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , 'wb+' ) as buffer: __UpperCamelCase = self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs ) else: __UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs ) return written def _lowerCamelCase ( self : List[str] , __A : BinaryIO , __A : int , **__A : List[str] ): __UpperCamelCase = 0 __UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __A ) __UpperCamelCase = self.dataset.features.arrow_schema __UpperCamelCase = pq.ParquetWriter(__A , schema=__A , **__A ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): __UpperCamelCase = query_table( table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__A ) written += batch.nbytes writer.close() return written
53
0
import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler') class lowerCamelCase__: def __init__( self: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: bool = True , UpperCamelCase_: bool = False ): __lowerCamelCase = scheduler __lowerCamelCase = optimizers if isinstance(__A , (list, tuple) ) else [optimizers] __lowerCamelCase = split_batches __lowerCamelCase = step_with_optimizer __lowerCamelCase = GradientState() def lowerCAmelCase__ ( self: List[str] , *UpperCamelCase_: Dict , **UpperCamelCase_: List[Any] ): if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*__A , **__A ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*__A , **__A ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step __lowerCamelCase = AcceleratorState().num_processes for _ in range(__A ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , """total_steps""" ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*__A , **__A ) else: self.scheduler.step(*__A , **__A ) def lowerCAmelCase__ ( self: List[str] ): return self.scheduler.get_last_lr() def lowerCAmelCase__ ( self: List[str] ): return self.scheduler.state_dict() def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Dict ): self.scheduler.load_state_dict(__A ) def lowerCAmelCase__ ( self: Optional[int] ): return self.scheduler.get_lr() def lowerCAmelCase__ ( self: Tuple , *UpperCamelCase_: Optional[int] , **UpperCamelCase_: str ): return self.scheduler.print_lr(*__A , **__A )
12
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def lowercase__ ( __lowercase : SplitDict ) -> int: """simple docstring""" __UpperCamelCase = split_dict._to_yaml_list() assert len(__lowercase ) == len(__lowercase ) __UpperCamelCase = SplitDict._from_yaml_list(__lowercase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump __UpperCamelCase = None # the split name of split_dict takes over the name of the split info object __UpperCamelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] ) def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" __UpperCamelCase = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
53
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __UpperCamelCase : Any = logging.get_logger(__name__) __UpperCamelCase : List[str] = '''▁''' __UpperCamelCase : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''} __UpperCamelCase : Any = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), } } __UpperCamelCase : Optional[int] = { '''facebook/mbart-large-en-ro''': 1024, '''facebook/mbart-large-cc25''': 1024, } # fmt: off __UpperCamelCase : Optional[Any] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class lowercase__ ( __lowerCamelCase): UpperCamelCase_ = VOCAB_FILES_NAMES UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ = ["input_ids", "attention_mask"] UpperCamelCase_ = [] UpperCamelCase_ = [] def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any="<s>" , UpperCamelCase__ : List[str]="</s>" , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : Optional[int]="<unk>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : Optional[int]="<mask>" , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Dict[str, Any]] = None , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token SCREAMING_SNAKE_CASE : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , tokenizer_file=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , ) SCREAMING_SNAKE_CASE : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__A ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token SCREAMING_SNAKE_CASE : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab SCREAMING_SNAKE_CASE : Tuple = 1 SCREAMING_SNAKE_CASE : str = len(self.sp_model ) SCREAMING_SNAKE_CASE : str = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__A ) } SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.lang_code_to_id.items()} SCREAMING_SNAKE_CASE : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} SCREAMING_SNAKE_CASE : Tuple = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) SCREAMING_SNAKE_CASE : int = src_lang if src_lang is not None else '''en_XX''' SCREAMING_SNAKE_CASE : List[str] = self.lang_code_to_id[self._src_lang] SCREAMING_SNAKE_CASE : str = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.__dict__.copy() SCREAMING_SNAKE_CASE : Union[str, Any] = None SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self : Optional[Any] , UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): SCREAMING_SNAKE_CASE : int = {} SCREAMING_SNAKE_CASE : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def __A ( self : Union[str, Any] ): '''simple docstring''' return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def __A ( self : List[str] ): '''simple docstring''' return self._src_lang @src_lang.setter def __A ( self : Optional[int] , UpperCamelCase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) SCREAMING_SNAKE_CASE : Optional[int] = [1] * len(self.prefix_tokens ) SCREAMING_SNAKE_CASE : str = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__A )) + suffix_ones return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones def __A ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = [self.sep_token_id] SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __A ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[str] ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) SCREAMING_SNAKE_CASE : List[Any] = src_lang SCREAMING_SNAKE_CASE : str = self(__A , add_special_tokens=__A , return_tensors=__A , **__A ) SCREAMING_SNAKE_CASE : Any = self.convert_tokens_to_ids(__A ) SCREAMING_SNAKE_CASE : Dict = tgt_lang_id return inputs def __A ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __A ( self : List[Any] , UpperCamelCase__ : str ): '''simple docstring''' return self.sp_model.encode(__A , out_type=__A ) def __A ( self : Optional[int] , UpperCamelCase__ : List[str] ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE : List[Any] = self.sp_model.PieceToId(__A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __A ( self : Dict , UpperCamelCase__ : List[str] ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __A ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ''''''.join(__A ).replace(__A , ''' ''' ).strip() return out_string def __A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join( __A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __A ) elif not os.path.isfile(self.vocab_file ): with open(__A , '''wb''' ) as fi: SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(__A ) return (out_vocab_file,) def __A ( self : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "en_XX" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "ro_RO" , **UpperCamelCase__ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = src_lang SCREAMING_SNAKE_CASE : str = tgt_lang return super().prepare_seqaseq_batch(__A , __A , **__A ) def __A ( self : Optional[Any] ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def __A ( self : Optional[Any] ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __A ( self : Dict , UpperCamelCase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.lang_code_to_id[src_lang] SCREAMING_SNAKE_CASE : Dict = [] SCREAMING_SNAKE_CASE : Optional[int] = [self.eos_token_id, self.cur_lang_code] def __A ( self : Tuple , UpperCamelCase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.lang_code_to_id[lang] SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : int = [self.eos_token_id, self.cur_lang_code]
182
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[str] ={ '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any =[ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
0
import doctest from collections import deque import numpy as np class UpperCAmelCase__ : """simple docstring""" def __init__( self ) -> List[str]: __UpperCamelCase =[2, 1, 2, -1] __UpperCamelCase =[1, 2, 3, 4] def _a ( self ) -> Tuple: __UpperCamelCase =len(self.first_signal ) __UpperCamelCase =len(self.second_signal ) __UpperCamelCase =max(__A , __A ) # create a zero matrix of max_length x max_length __UpperCamelCase =[[0] * max_length for i in range(__A )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(__A ): __UpperCamelCase =deque(self.second_signal ) rotated_signal.rotate(__A ) for j, item in enumerate(__A ): matrix[i][j] += item # multiply the matrix with the first signal __UpperCamelCase =np.matmul(np.transpose(__A ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(__A , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
62
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a__ : str =logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str =["input_features", "attention_mask"] def __init__( self : Union[str, Any] , __A : Optional[int]=8_0 , __A : Tuple=1_6_0_0_0 , __A : Optional[Any]=8_0 , __A : Any=0.0 , __A : Any=True , __A : List[str]=True , __A : str=True , **__A : List[Any] , ): super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A ) __UpperCamelCase = num_mel_bins __UpperCamelCase = do_ceptral_normalize __UpperCamelCase = normalize_means __UpperCamelCase = normalize_vars __UpperCamelCase = True def _lowerCamelCase ( self : Union[str, Any] , __A : np.ndarray , ): __UpperCamelCase = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers __UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 ) __UpperCamelCase = ta_kaldi.fbank(__A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _lowerCamelCase ( __A : np.ndarray , __A : int , __A : Optional[bool] = True , __A : Optional[bool] = True , __A : float = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: __UpperCamelCase = x[:input_length].mean(axis=0 ) __UpperCamelCase = np.subtract(__A , __A ) if normalize_vars: __UpperCamelCase = x[:input_length].std(axis=0 ) __UpperCamelCase = np.divide(__A , __A ) if input_length < x.shape[0]: __UpperCamelCase = padding_value # make sure array is in float32 __UpperCamelCase = x.astype(np.floataa ) return x def _lowerCamelCase ( self : int , __A : List[np.ndarray] , __A : Optional[np.ndarray] = None ): __UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__A , __A , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(__A , __A ) ] def __call__( self : List[Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : Union[bool, str, PaddingStrategy] = False , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[int] = None , __A : Optional[bool] = None , **__A : Dict , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __UpperCamelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) __UpperCamelCase = is_batched_numpy or ( isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__A , np.ndarray ): __UpperCamelCase = np.asarray(__A , dtype=np.floataa ) elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __UpperCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __UpperCamelCase = [raw_speech] # extract fbank features __UpperCamelCase = [self._extract_fbank_features(__A ) for waveform in raw_speech] # convert into correct format for padding __UpperCamelCase = BatchFeature({'input_features': features} ) __UpperCamelCase = self.pad( __A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , ) # make sure list is in array format __UpperCamelCase = padded_inputs.get('input_features' ) if isinstance(input_features[0] , __A ): __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_features] __UpperCamelCase = padded_inputs.get('attention_mask' ) if attention_mask is not None: __UpperCamelCase = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __UpperCamelCase = ( np.array(__A , dtype=np.intaa ) if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD else None ) __UpperCamelCase = self.normalize( padded_inputs['input_features'] , attention_mask=__A ) if return_tensors is not None: __UpperCamelCase = padded_inputs.convert_to_tensors(__A ) return padded_inputs
53
0
'''simple docstring''' def _lowerCamelCase ( lowercase : list ) -> list: _a = len(__lowercase ) for _ in range(__lowercase ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: _a , _a = arr[i + 1], arr[i] return arr if __name__ == "__main__": lowerCAmelCase_ : Union[str, Any] = list(range(10, 0, -1)) print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
63
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : List[Any] =logging.get_logger(__name__) a__ : List[Any] ={ '''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model" def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ): super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = project_dim class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model" def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ): super().__init__(**__A ) __UpperCamelCase = hidden_size __UpperCamelCase = intermediate_size __UpperCamelCase = projection_dim __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = num_channels __UpperCamelCase = patch_size __UpperCamelCase = image_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = attention_dropout __UpperCamelCase = layer_norm_eps __UpperCamelCase = hidden_act @classmethod def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ): cls._set_token_in_kwargs(__A ) __UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('model_type' ) == "altclip": __UpperCamelCase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] ="altclip" SCREAMING_SNAKE_CASE_ : Optional[int] =True def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). __UpperCamelCase = kwargs.pop('text_config_dict' , __A ) __UpperCamelCase = kwargs.pop('vision_config_dict' , __A ) super().__init__(**__A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __UpperCamelCase = {} # This is the complete result when using `text_config_dict`. __UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __UpperCamelCase = {} # This is the complete result when using `vision_config_dict`. __UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __UpperCamelCase = { str(__A ): value for key, value in _vision_config_dict['id2label'].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __UpperCamelCase = {} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' ) if vision_config is None: __UpperCamelCase = {} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' ) __UpperCamelCase = AltCLIPTextConfig(**__A ) __UpperCamelCase = AltCLIPVisionConfig(**__A ) __UpperCamelCase = projection_dim __UpperCamelCase = logit_scale_init_value __UpperCamelCase = 1.0 @classmethod def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = copy.deepcopy(self.__dict__ ) __UpperCamelCase = self.text_config.to_dict() __UpperCamelCase = self.vision_config.to_dict() __UpperCamelCase = self.__class__.model_type return output
53
0
'''simple docstring''' def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(__lowercase , n - 1 , __lowercase ) * a) % mod else: _UpperCAmelCase : List[Any] = binary_exponentiation(__lowercase , n / 2 , __lowercase ) return (b * b) % mod # a prime number lowerCamelCase__ = 701 lowerCamelCase__ = 1_000_000_000 lowerCamelCase__ = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
234
'''simple docstring''' import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Any ) -> Optional[Any]: """simple docstring""" with open(__lowercase ) as metadata_file: __UpperCamelCase = json.load(__lowercase ) __UpperCamelCase = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['model_config'] ) # Load in the weights from the checkpoint_path __UpperCamelCase = torch.load(__lowercase , map_location='cpu' ) # Load the entity vocab file __UpperCamelCase = load_entity_vocab(__lowercase ) __UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks __UpperCamelCase = AddedToken('<ent>' , lstrip=__lowercase , rstrip=__lowercase ) __UpperCamelCase = AddedToken('<ent2>' , lstrip=__lowercase , rstrip=__lowercase ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(__lowercase ) with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(__lowercase , __lowercase ) __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase ) # Initialize the embeddings of the special tokens __UpperCamelCase = state_dict['embeddings.word_embeddings.weight'] __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 ) __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 ) __UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __UpperCamelCase = F'''encoder.layer.{layer_index}.attention.self.''' __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight'] __UpperCamelCase = entity_emb[entity_vocab['[MASK]']] __UpperCamelCase = LukeModel(config=__lowercase ).eval() __UpperCamelCase , __UpperCamelCase = model.load_state_dict(__lowercase , strict=__lowercase ) if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'''Missing keys {', '.join(__lowercase )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )): raise ValueError( 'Unexpected keys' F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' ) # Check outputs __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase , task='entity_classification' ) __UpperCamelCase = ( 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the' ' new world number one avoid a humiliating second- round exit at Wimbledon .' ) __UpperCamelCase = (39, 42) __UpperCamelCase = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors='pt' ) __UpperCamelCase = model(**__lowercase ) # Verify word hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 42, 1024) ) __UpperCamelCase = torch.tensor( [[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] ) else: # base __UpperCamelCase = torch.Size((1, 42, 768) ) __UpperCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 1, 1024) ) __UpperCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] ) else: # base __UpperCamelCase = torch.Size((1, 1, 768) ) __UpperCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(__lowercase ) ) model.save_pretrained(__lowercase ) def lowercase__ ( __lowercase : Dict ) -> List[str]: """simple docstring""" __UpperCamelCase = {} with open(__lowercase , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(__lowercase ): __UpperCamelCase , __UpperCamelCase = line.rstrip().split('\t' ) __UpperCamelCase = index return entity_vocab if __name__ == "__main__": a__ : Any =argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) a__ : str =parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
53
0
'''simple docstring''' import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowerCAmelCase ( __lowerCamelCase, unittest.TestCase ): """simple docstring""" lowerCamelCase = LayoutLMTokenizer lowerCamelCase = LayoutLMTokenizerFast lowerCamelCase = True lowerCamelCase = True def UpperCAmelCase_ ( self ) -> Optional[Any]: super().setUp() A_ : int = [ """[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] A_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> List[Any]: return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__A ) def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Dict: A_ : Union[str, Any] = """UNwant\u00E9d,running""" A_ : Union[str, Any] = """unwanted, running""" return input_text, output_text def UpperCAmelCase_ ( self ) -> List[Any]: A_ : Optional[Any] = self.tokenizer_class(self.vocab_file ) A_ : Optional[Any] = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(__A , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [7, 4, 5, 10, 8, 9] ) def UpperCAmelCase_ ( self ) -> Optional[Any]: pass
344
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class snake_case ( __lowerCamelCase ): """simple docstring""" def _lowerCamelCase ( self : Any ): __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = 8 # DPR tok __UpperCamelCase = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __UpperCamelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok __UpperCamelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) ) __UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __UpperCamelCase = {'unk_token': '<unk>'} __UpperCamelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__A ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__A ) ) def _lowerCamelCase ( self : Tuple ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Optional[int] ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Union[str, Any] ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def _lowerCamelCase ( self : str ): shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _lowerCamelCase ( self : Any , __A : bool ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , ) if from_disk: __UpperCamelCase = os.path.join(self.tmpdirname , 'dataset' ) __UpperCamelCase = os.path.join(self.tmpdirname , 'index.faiss' ) dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) ) dataset.drop_index('embeddings' ) dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) ) del dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , ) return retriever def _lowerCamelCase ( self : int ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) __UpperCamelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' ) dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' ) pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) ) __UpperCamelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' ) __UpperCamelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset} pickle.dump(__A , open(__A , 'wb' ) ) __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , ) __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = self.get_dummy_dataset() retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_legacy_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] ) self.assertEqual(len(doc_dicts[0]['text'] ) , __A ) self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Optional[Any] ): import torch __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , np.ndarray ) __UpperCamelCase = retriever( __A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( # noqa: F841 out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], out['doc_ids'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dpr_ctx_encoder_tokenizer() __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) retriever.set_ctx_encoder_tokenizer(__A ) __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) self.assertEqual( len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
53
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor lowercase__ = logging.get_logger(__name__) class A_ ( __lowerCamelCase ): '''simple docstring''' def __init__( self : List[str] , *lowercase_ : Optional[int] , **lowercase_ : str ) -> List[str]: warnings.warn( 'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use BeitImageProcessor instead.' , __A , ) super().__init__(*__A , **__A )
151
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[Any] ={ '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] =[ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys a__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'''tokenizer_file''': '''tokenizer.json'''} __UpperCAmelCase = { '''tokenizer_file''': { '''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''', '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''', }, } class UpperCamelCase__ ( __lowerCamelCase ): """simple docstring""" UpperCAmelCase_ =VOCAB_FILES_NAMES UpperCAmelCase_ =PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ =["input_ids", "attention_mask"] UpperCAmelCase_ =None def __init__( self , _A=None , _A=None , _A=None , _A="<unk>" , _A="<s>" , _A="</s>" , _A="<pad>" , _A=False , _A=False , **_A , ) -> str: super().__init__( __A , __A , tokenizer_file=__A , unk_token=__A , bos_token=__A , eos_token=__A , pad_token=__A , add_prefix_space=__A , clean_up_tokenization_spaces=__A , **__A , ) SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __A ) != add_prefix_space: SCREAMING_SNAKE_CASE_ = getattr(__A , pre_tok_state.pop('''type''' ) ) SCREAMING_SNAKE_CASE_ = add_prefix_space SCREAMING_SNAKE_CASE_ = pre_tok_class(**__A ) SCREAMING_SNAKE_CASE_ = add_prefix_space def _UpperCamelCase ( self , *_A , **_A ) -> List[Any]: SCREAMING_SNAKE_CASE_ = kwargs.get('''is_split_into_words''' , __A ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ''' pretokenized inputs.''' ) return super()._batch_encode_plus(*__A , **__A ) def _UpperCamelCase ( self , *_A , **_A ) -> Any: SCREAMING_SNAKE_CASE_ = kwargs.get('''is_split_into_words''' , __A ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ''' pretokenized inputs.''' ) return super()._encode_plus(*__A , **__A ) def _UpperCamelCase ( self , _A , _A = None ) -> Tuple: SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(__A , name=__A ) return tuple(__A ) def _UpperCamelCase ( self , _A ) -> Dict: SCREAMING_SNAKE_CASE_ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__A , add_special_tokens=__A ) + [self.eos_token_id] ) if len(__A ) > self.model_max_length: SCREAMING_SNAKE_CASE_ = input_ids[-self.model_max_length :] return input_ids
299
'''simple docstring''' import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any]=False ) -> Tuple: """simple docstring""" try: __UpperCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __UpperCamelCase = default else: # KEY is set, convert it to True or False. try: __UpperCamelCase = strtobool(__lowercase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value a__ : str =parse_flag_from_env('''RUN_SLOW''', default=False) a__ : Union[str, Any] =parse_flag_from_env('''RUN_REMOTE''', default=False) a__ : List[str] =parse_flag_from_env('''RUN_LOCAL''', default=True) a__ : Optional[int] =parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression a__ : Any =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') a__ : Optional[int] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') a__ : List[str] =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio a__ : Any =pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam a__ : Tuple =pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility a__ : Union[str, Any] =pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows a__ : int =pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase__ ( __lowercase : Optional[Any] ) -> Optional[int]: """simple docstring""" try: import faiss # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires faiss' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Any: """simple docstring""" try: import regex # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires regex' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Tuple ) -> List[Any]: """simple docstring""" try: import elasticsearch # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires elasticsearch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Union[str, Any] ) -> Tuple: """simple docstring""" try: import sqlalchemy # noqa except ImportError: __UpperCamelCase = unittest.skip('test requires sqlalchemy' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[str] ) -> List[str]: """simple docstring""" if not config.TORCH_AVAILABLE: __UpperCamelCase = unittest.skip('test requires PyTorch' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[Any] ) -> List[str]: """simple docstring""" if not config.TF_AVAILABLE: __UpperCamelCase = unittest.skip('test requires TensorFlow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : int ) -> Union[str, Any]: """simple docstring""" if not config.JAX_AVAILABLE: __UpperCamelCase = unittest.skip('test requires JAX' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> Optional[Any]: """simple docstring""" if not config.PIL_AVAILABLE: __UpperCamelCase = unittest.skip('test requires Pillow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : int ) -> int: """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> int: """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : str ) -> Any: """simple docstring""" def _require_spacy_model(__lowercase : Any ): try: import spacy # noqa F401 spacy.load(__lowercase ) except ImportError: return unittest.skip('test requires spacy' )(__lowercase ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(__lowercase ) )(__lowercase ) else: return test_case return _require_spacy_model def lowercase__ ( __lowercase : Union[str, Any] ) -> str: """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]: """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(__lowercase ) else: return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: __UpperCamelCase = unittest.skip('test is slow' )(__lowercase ) return test_case def lowercase__ ( __lowercase : List[Any] ) -> List[str]: """simple docstring""" if not _run_local_tests or _run_local_tests == 0: __UpperCamelCase = unittest.skip('test is local' )(__lowercase ) return test_case def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: __UpperCamelCase = unittest.skip('test is packaged' )(__lowercase ) return test_case def lowercase__ ( __lowercase : Optional[int] ) -> Any: """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: __UpperCamelCase = unittest.skip('test requires remote' )(__lowercase ) return test_case def lowercase__ ( *__lowercase : Optional[Any] ) -> Tuple: """simple docstring""" def decorate(cls : int ): for name, fn in cls.__dict__.items(): if callable(__lowercase ) and name.startswith('test' ): for decorator in decorators: __UpperCamelCase = decorator(__lowercase ) setattr(cls , __lowercase , __lowercase ) return cls return decorate class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =0 SCREAMING_SNAKE_CASE_ : List[Any] =1 SCREAMING_SNAKE_CASE_ : Union[str, Any] =2 @contextmanager def lowercase__ ( __lowercase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , __lowercase : Dict=1e-16 ) -> List[Any]: """simple docstring""" __UpperCamelCase = requests.Session().request def timeout_request(__lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , **__lowercase : List[str] ): # Change the url to an invalid url so that the connection hangs __UpperCamelCase = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) __UpperCamelCase = timeout try: return online_request(__lowercase , __lowercase , **__lowercase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __UpperCamelCase = url __UpperCamelCase = e.args[0] __UpperCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]''' ),) __UpperCamelCase = (max_retry_error,) raise def raise_connection_error(__lowercase : int , __lowercase : List[str] , **__lowercase : Union[str, Any] ): raise requests.ConnectionError('Offline mode is enabled.' , request=__lowercase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , __lowercase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , __lowercase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , __lowercase ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def lowercase__ ( *__lowercase : Any , **__lowercase : Dict ) -> Dict: """simple docstring""" __UpperCamelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__lowercase , **__lowercase ) as tmp_dir: try: os.chdir(__lowercase ) yield finally: os.chdir(__lowercase ) @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase__ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() __UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase__ ( __lowercase : List[str] , __lowercase : int ) -> Union[str, Any]: """simple docstring""" return deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__lowercase ).integers(0 , 100 , 10 ).tolist() def lowercase__ ( __lowercase : str ) -> List[str]: """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(__lowercase : List[Any] , *__lowercase : Tuple , **__lowercase : Union[str, Any] ): try: return func(*__lowercase , **__lowercase ) except HTTPError as err: if str(__lowercase ).startswith('500' ) or str(__lowercase ).startswith('502' ): pytest.xfail(str(__lowercase ) ) raise err return decorator.decorator(_wrapper , __lowercase ) class snake_case : """simple docstring""" def __init__( self : int , __A : Any , __A : str , __A : List[Any] ): __UpperCamelCase = returncode __UpperCamelCase = stdout __UpperCamelCase = stderr async def lowercase__ ( __lowercase : Any , __lowercase : Optional[int] ) -> str: """simple docstring""" while True: __UpperCamelCase = await stream.readline() if line: callback(__lowercase ) else: break async def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None , __lowercase : Any=None , __lowercase : Optional[Any]=None , __lowercase : int=False , __lowercase : List[Any]=False ) -> _RunOutput: """simple docstring""" if echo: print('\nRunning: ' , ' '.join(__lowercase ) ) __UpperCamelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __UpperCamelCase = [] __UpperCamelCase = [] def tee(__lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Tuple="" ): __UpperCamelCase = line.decode('utf-8' ).rstrip() sink.append(__lowercase ) if not quiet: print(__lowercase , __lowercase , file=__lowercase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label='stderr:' ) ), ] , timeout=__lowercase , ) return _RunOutput(await p.wait() , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : int=180 , __lowercase : int=False , __lowercase : str=True ) -> _RunOutput: """simple docstring""" __UpperCamelCase = asyncio.get_event_loop() __UpperCamelCase = loop.run_until_complete( _stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) ) __UpperCamelCase = ' '.join(__lowercase ) if result.returncode > 0: __UpperCamelCase = '\n'.join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' ) return result def lowercase__ ( ) -> List[str]: """simple docstring""" __UpperCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) __UpperCamelCase = re.sub(R'^gw' , '' , __lowercase , 0 , re.M ) return int(__lowercase ) def lowercase__ ( ) -> List[Any]: """simple docstring""" __UpperCamelCase = 29500 __UpperCamelCase = pytest_xdist_worker_id() return port + uniq_delta
53
0
'''simple docstring''' from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
297
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys a__ : Tuple ='''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
53
0
"""simple docstring""" import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> List[Any]: _lowerCAmelCase : Optional[int] = torch.exp(__lowercase ) _lowerCAmelCase : Union[str, Any] = torch.sum(__lowercase ,dim=1 ) # sum of exp(x_i) _lowerCAmelCase : Tuple = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i) return torch.log(__lowercase ) - B / A class __A ( nn.Module ): def __init__( self , a__ ): super().__init__() _lowerCAmelCase : Any = config.output_attentions _lowerCAmelCase : Any = config.output_hidden_states _lowerCAmelCase : Union[str, Any] = nn.ModuleList([BertLayer(__A ) for _ in range(config.num_hidden_layers )] ) _lowerCAmelCase : Union[str, Any] = nn.ModuleList([BertHighway(__A ) for _ in range(config.num_hidden_layers )] ) _lowerCAmelCase : Any = [-1 for _ in range(config.num_hidden_layers )] def __A ( self , a__ ): if (type(__A ) is float) or (type(__A ) is int): for i in range(len(self.early_exit_entropy ) ): _lowerCAmelCase : Union[str, Any] = x else: _lowerCAmelCase : List[Any] = x def __A ( self , a__ ): _lowerCAmelCase : Union[str, Any] = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ): _lowerCAmelCase : Any = () _lowerCAmelCase : Tuple = () _lowerCAmelCase : List[Any] = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: _lowerCAmelCase : Any = all_hidden_states + (hidden_states,) _lowerCAmelCase : Tuple = layer_module( __A , __A , head_mask[i] , __A , __A ) _lowerCAmelCase : Optional[int] = layer_outputs[0] if self.output_attentions: _lowerCAmelCase : Union[str, Any] = all_attentions + (layer_outputs[1],) _lowerCAmelCase : Tuple = (hidden_states,) if self.output_hidden_states: _lowerCAmelCase : List[Any] = current_outputs + (all_hidden_states,) if self.output_attentions: _lowerCAmelCase : str = current_outputs + (all_attentions,) _lowerCAmelCase : Tuple = self.highway[i](__A ) # logits, pooled_output if not self.training: _lowerCAmelCase : Union[str, Any] = highway_exit[0] _lowerCAmelCase : int = entropy(__A ) _lowerCAmelCase : Optional[int] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy _lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: _lowerCAmelCase : List[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(__A , i + 1 ) else: _lowerCAmelCase : str = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: _lowerCAmelCase : Optional[int] = all_hidden_states + (hidden_states,) _lowerCAmelCase : Optional[int] = (hidden_states,) if self.output_hidden_states: _lowerCAmelCase : List[str] = outputs + (all_hidden_states,) if self.output_attentions: _lowerCAmelCase : List[Any] = outputs + (all_attentions,) _lowerCAmelCase : Dict = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( "The Bert Model transformer with early exiting (DeeBERT). " , __lowerCamelCase , ) class __A ( __lowerCamelCase ): def __init__( self , a__ ): super().__init__(__A ) _lowerCAmelCase : str = config _lowerCAmelCase : Tuple = BertEmbeddings(__A ) _lowerCAmelCase : Optional[Any] = DeeBertEncoder(__A ) _lowerCAmelCase : Optional[int] = BertPooler(__A ) self.init_weights() def __A ( self ): self.encoder.init_highway_pooler(self.pooler ) def __A ( self ): return self.embeddings.word_embeddings def __A ( self , a__ ): _lowerCAmelCase : Dict = value def __A ( self , a__ ): for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(__A ) @add_start_docstrings_to_model_forward(__A ) def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ): if input_ids is not None and inputs_embeds is not None: raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" ) elif input_ids is not None: _lowerCAmelCase : str = input_ids.size() elif inputs_embeds is not None: _lowerCAmelCase : str = inputs_embeds.size()[:-1] else: raise ValueError("""You have to specify either input_ids or inputs_embeds""" ) _lowerCAmelCase : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: _lowerCAmelCase : Dict = torch.ones(__A , device=__A ) if encoder_attention_mask is None: _lowerCAmelCase : List[str] = torch.ones(__A , device=__A ) if token_type_ids is None: _lowerCAmelCase : List[str] = torch.zeros(__A , dtype=torch.long , device=__A ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. _lowerCAmelCase : Dict = self.get_extended_attention_mask(__A , __A , __A ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: _lowerCAmelCase : Any = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: _lowerCAmelCase : List[str] = encoder_attention_mask[:, None, None, :] _lowerCAmelCase : List[str] = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility _lowerCAmelCase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] _lowerCAmelCase : Optional[Any] = self.get_head_mask(__A , self.config.num_hidden_layers ) _lowerCAmelCase : List[str] = self.embeddings( input_ids=__A , position_ids=__A , token_type_ids=__A , inputs_embeds=__A ) _lowerCAmelCase : Optional[Any] = self.encoder( __A , attention_mask=__A , head_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , ) _lowerCAmelCase : int = encoder_outputs[0] _lowerCAmelCase : Optional[int] = self.pooler(__A ) _lowerCAmelCase : Tuple = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class __A ( __lowerCamelCase ): def __init__( self , a__ , a__ ): _lowerCAmelCase : List[str] = message _lowerCAmelCase : Optional[int] = exit_layer # start from 1! class __A ( nn.Module ): def __init__( self , a__ ): super().__init__() _lowerCAmelCase : List[Any] = BertPooler(__A ) _lowerCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob ) _lowerCAmelCase : Any = nn.Linear(config.hidden_size , config.num_labels ) def __A ( self , a__ ): # Pooler _lowerCAmelCase : Tuple = encoder_outputs[0] _lowerCAmelCase : Optional[int] = self.pooler(__A ) # "return" pooler_output # BertModel _lowerCAmelCase : Optional[Any] = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification _lowerCAmelCase : str = bmodel_output[1] _lowerCAmelCase : List[str] = self.dropout(__A ) _lowerCAmelCase : Dict = self.classifier(__A ) return logits, pooled_output @add_start_docstrings( "Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , __lowerCamelCase , ) class __A ( __lowerCamelCase ): def __init__( self , a__ ): super().__init__(__A ) _lowerCAmelCase : Dict = config.num_labels _lowerCAmelCase : Tuple = config.num_hidden_layers _lowerCAmelCase : Tuple = DeeBertModel(__A ) _lowerCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob ) _lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(__A ) def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ): _lowerCAmelCase : Dict = self.num_layers try: _lowerCAmelCase : Tuple = self.bert( __A , attention_mask=__A , token_type_ids=__A , position_ids=__A , head_mask=__A , inputs_embeds=__A , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits _lowerCAmelCase : int = outputs[1] _lowerCAmelCase : Any = self.dropout(__A ) _lowerCAmelCase : int = self.classifier(__A ) _lowerCAmelCase : Optional[int] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _lowerCAmelCase : Optional[Any] = e.message _lowerCAmelCase : List[str] = e.exit_layer _lowerCAmelCase : Dict = outputs[0] if not self.training: _lowerCAmelCase : List[str] = entropy(__A ) _lowerCAmelCase : Union[str, Any] = [] _lowerCAmelCase : List[str] = [] if labels is not None: if self.num_labels == 1: # We are doing regression _lowerCAmelCase : Tuple = MSELoss() _lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _lowerCAmelCase : List[Any] = CrossEntropyLoss() _lowerCAmelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _lowerCAmelCase : int = [] for highway_exit in outputs[-1]: _lowerCAmelCase : int = highway_exit[0] if not self.training: highway_logits_all.append(__A ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _lowerCAmelCase : Union[str, Any] = MSELoss() _lowerCAmelCase : Union[str, Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _lowerCAmelCase : str = CrossEntropyLoss() _lowerCAmelCase : Optional[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(__A ) if train_highway: _lowerCAmelCase : int = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _lowerCAmelCase : int = (loss,) + outputs if not self.training: _lowerCAmelCase : Union[str, Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _lowerCAmelCase : Tuple = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
44
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Tuple ) -> Tuple: """simple docstring""" return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :] def lowercase__ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] , __lowercase : List[str]="attention" ) -> Optional[Any]: """simple docstring""" __UpperCamelCase = __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] ) __UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] ) __UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] ) __UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] ) __UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : int , __lowercase : List[Any]=False ) -> Optional[Any]: """simple docstring""" if split_mlp_wi: __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :] __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :] __UpperCamelCase = (wi_a, wi_a) else: __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :] __UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :] return wi, wo def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] ) -> str: """simple docstring""" return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i] def lowercase__ ( __lowercase : dict , *, __lowercase : int , __lowercase : bool , __lowercase : bool = False ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = traverse_util.flatten_dict(variables['target'] ) __UpperCamelCase = {'/'.join(__lowercase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old print('Split MLP:' , __lowercase ) __UpperCamelCase = collections.OrderedDict() # Shared embeddings. __UpperCamelCase = old['token_embedder/embedding'] # Encoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'encoder' , 'attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 1 (MLP). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_mlp_layer_norm' ) __UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'encoder' , __lowercase ) __UpperCamelCase = layer_norm if split_mlp_wi: __UpperCamelCase = wi[0].T __UpperCamelCase = wi[1].T else: __UpperCamelCase = wi.T __UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , __lowercase , 'encoder' ).T __UpperCamelCase = old['encoder/encoder_norm/scale'] if not scalable_attention: __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , 0 , 'encoder' ).T __UpperCamelCase = tax_relpos_bias_lookup( __lowercase , 0 , 'decoder' ).T if not is_encoder_only: # Decoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_self_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'self_attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 1 (Cross Attention). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_cross_attention_layer_norm' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'encoder_decoder_attention' ) __UpperCamelCase = layer_norm __UpperCamelCase = k.T __UpperCamelCase = o.T __UpperCamelCase = q.T __UpperCamelCase = v.T # Block i, layer 2 (MLP). __UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_mlp_layer_norm' ) __UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'decoder' , __lowercase ) __UpperCamelCase = layer_norm if split_mlp_wi: __UpperCamelCase = wi[0].T __UpperCamelCase = wi[1].T else: __UpperCamelCase = wi.T __UpperCamelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase = tax_relpos_bias_lookup(__lowercase , __lowercase , 'decoder' ).T __UpperCamelCase = old['decoder/decoder_norm/scale'] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __UpperCamelCase = old['decoder/logits_dense/kernel'].T return new def lowercase__ ( __lowercase : Optional[Any] , __lowercase : bool ) -> int: """simple docstring""" __UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __UpperCamelCase = state_dict['shared.weight'] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __UpperCamelCase = state_dict['shared.weight'] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('Using shared word embeddings as lm_head.' ) __UpperCamelCase = state_dict['shared.weight'] return state_dict def lowercase__ ( __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = checkpoints.load_tax_checkpoint(__lowercase ) __UpperCamelCase = convert_tax_to_pytorch( __lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase , scalable_attention=__lowercase ) __UpperCamelCase = make_state_dict(__lowercase , __lowercase ) model.load_state_dict(__lowercase , strict=__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : bool = False , __lowercase : bool = False , ) -> Optional[int]: """simple docstring""" __UpperCamelCase = MTaConfig.from_json_file(__lowercase ) print(F'''Building PyTorch model from configuration: {config}''' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __UpperCamelCase = UMTaEncoderModel(__lowercase ) else: __UpperCamelCase = UMTaForConditionalGeneration(__lowercase ) # Load weights from tf checkpoint load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(__lowercase ) # Verify that we can load the checkpoint. model.from_pretrained(__lowercase ) print('Done' ) if __name__ == "__main__": a__ : List[Any] =argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) parser.add_argument( '''--scalable_attention''', action='''store_true''', help='''Whether the model uses scaled attention (umt5 model)''', default=False, ) a__ : List[str] =parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
53
0
"""simple docstring""" import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation A: int = logging.get_logger(__name__) A: List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''} A: int = { '''vocab_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''', }, '''emoji_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''', }, } A: Dict = { '''abeja/gpt-neox-japanese-2.7b''': 2_0_4_8, } def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Dict ): with open(__lowercase , """r""" , encoding="""utf-8""" ) as f: UpperCAmelCase : List[str] = json.loads(f.read() ) UpperCAmelCase : List[Any] = collections.OrderedDict() UpperCAmelCase : Tuple = collections.OrderedDict() UpperCAmelCase : Any = collections.OrderedDict() with open(__lowercase , """r""" , encoding="""utf-8""" ) as f: UpperCAmelCase : Optional[int] = f.readlines() UpperCAmelCase : Dict = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token] for idx, b in enumerate(__lowercase ): UpperCAmelCase : Dict = b UpperCAmelCase : Optional[int] = idx for wd in b: UpperCAmelCase : Optional[Any] = idx return vocab, raw_vocab, ids_to_tokens, emoji class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase ): __lowerCAmelCase : Any = VOCAB_FILES_NAMES __lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase : Dict = ["input_ids", "attention_mask"] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|startoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: '''simple docstring''' super().__init__( unk_token=__A , pad_token=__A , bos_token=__A , eos_token=__A , do_clean_text=__A , **__A , ) if not os.path.isfile(__A ): raise ValueError( F"Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained" """ model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" ) if not os.path.isfile(__A ): raise ValueError( F"Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google" """ pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" ) UpperCAmelCase : Tuple = do_clean_text UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = load_vocab_and_emoji(__A , __A ) UpperCAmelCase : List[Any] = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' return len(self.raw_vocab ) def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return dict(self.raw_vocab , **self.added_tokens_encoder ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: '''simple docstring''' return self.subword_tokenizer.tokenize(__A , clean=self.do_clean_text ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' return self.vocab.get(__A , self.vocab.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' return self.subword_tokenizer.convert_id_to_token(__A ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Any = """""".join(__A ).strip() return out_string def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' UpperCAmelCase : Any = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__A , add_special_tokens=__A ) + [self.eos_token_id] ) if len(__A ) > self.model_max_length: UpperCAmelCase : List[str] = input_ids[-self.model_max_length :] return input_ids def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[str]: '''simple docstring''' UpperCAmelCase : Optional[int] = 0 if os.path.isdir(__A ): UpperCAmelCase : Union[str, Any] = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase : Dict = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] ) else: UpperCAmelCase : Optional[Any] = ( (filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase : Any = ( (filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""] ) with open(__A , """w""" , encoding="""utf-8""" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." """ Please check that the vocabulary is not corrupted!""" ) UpperCAmelCase : Any = token_index writer.write(""",""".join(__A ) + """\n""" ) index += 1 with open(__A , """w""" , encoding="""utf-8""" ) as writer: json.dump(self.emoji , __A ) return vocab_file, emoji_file class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase ): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' UpperCAmelCase : Union[str, Any] = vocab # same as swe UpperCAmelCase : Any = ids_to_tokens # same as bpe UpperCAmelCase : Optional[int] = emoji UpperCAmelCase : Any = np.max([len(__A ) for w in self.vocab.keys()] ) UpperCAmelCase : Any = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" ) UpperCAmelCase : List[str] = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" ) UpperCAmelCase : List[Any] = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" ) UpperCAmelCase : List[Any] = re.compile( r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" ) UpperCAmelCase : Optional[Any] = re.compile( r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" ) UpperCAmelCase : str = re.compile( r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" ) UpperCAmelCase : Union[str, Any] = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿""" UpperCAmelCase : Union[str, Any] = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟""" UpperCAmelCase : Any = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} ) def __len__( self ) -> str: '''simple docstring''' return len(self.ids_to_tokens ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' UpperCAmelCase : Any = self.content_repattera.sub("""<URL>""" , __A ) UpperCAmelCase : Union[str, Any] = self.content_repattera.sub("""<EMAIL>""" , __A ) UpperCAmelCase : Optional[int] = self.content_repattera.sub("""<TEL>""" , __A ) UpperCAmelCase : Dict = self.content_repattera.sub("""<DATE>""" , __A ) UpperCAmelCase : Optional[int] = self.content_repattera.sub("""<DATE>""" , __A ) UpperCAmelCase : int = self.content_repattera.sub("""<PRICE>""" , __A ) UpperCAmelCase : List[Any] = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: UpperCAmelCase : List[str] = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" ) return content def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Tuple = text.replace(""" """ , """<SP>""" ) UpperCAmelCase : Dict = text.replace(""" """ , """<SP>""" ) UpperCAmelCase : int = text.replace("""\r\n""" , """<BR>""" ) UpperCAmelCase : Dict = text.replace("""\n""" , """<BR>""" ) UpperCAmelCase : List[str] = text.replace("""\r""" , """<BR>""" ) UpperCAmelCase : int = text.replace("""\t""" , """<TAB>""" ) UpperCAmelCase : int = text.replace("""—""" , """ー""" ) UpperCAmelCase : Tuple = text.replace("""−""" , """ー""" ) for k, v in self.emoji["emoji"].items(): if k in text: UpperCAmelCase : List[Any] = text.replace(__A , __A ) if clean: UpperCAmelCase : Union[str, Any] = self.clean_text(__A ) def check_simbol(_SCREAMING_SNAKE_CASE ): UpperCAmelCase : str = x.encode() if len(__A ) == 1 and len(__A ) == 2: UpperCAmelCase : str = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0xC_2_A_1 and c <= 0xC_2_B_F) or (c >= 0xC_7_8_0 and c <= 0xC_7_8_3) or (c >= 0xC_A_B_9 and c <= 0xC_B_B_F) or (c >= 0xC_C_8_0 and c <= 0xC_D_A_2) ): return True return False def checkuae(_SCREAMING_SNAKE_CASE ): UpperCAmelCase : Optional[Any] = x.encode() if len(__A ) == 1 and len(__A ) == 3: UpperCAmelCase : List[Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0xE_2_8_0_8_0 and c <= 0xE_2_B_0_7_F: return True return False UpperCAmelCase : Any = 0 UpperCAmelCase : str = [] while pos < len(__A ): UpperCAmelCase : Optional[int] = min(len(__A ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3 UpperCAmelCase : str = [] # (token_id, token, pos) for e in range(__A , __A , -1 ): UpperCAmelCase : Optional[Any] = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(__A ) > 2: UpperCAmelCase : int = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(__A ) > 0: # the smallest token_id is adopted UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = sorted(__A , key=lambda _SCREAMING_SNAKE_CASE : x[0] )[0] result.append(__A ) UpperCAmelCase : Any = e else: UpperCAmelCase : List[Any] = pos + 1 UpperCAmelCase : List[Any] = text[pos:end] if check_simbol(__A ): result.append("""<KIGOU>""" ) elif checkuae(__A ): result.append("""<U2000U2BFF>""" ) else: for i in wd.encode("""utf-8""" ): result.append("""<|byte%d|>""" % i ) UpperCAmelCase : Union[str, Any] = end return result def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="\n" ) -> int: '''simple docstring''' UpperCAmelCase : Any = [] UpperCAmelCase : Optional[int] = [] UpperCAmelCase : List[Any] = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(__A ) > 0: words.append(bytearray(__A ).decode("""utf-8""" , errors="""replace""" ) ) UpperCAmelCase : Dict = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["""emoji_inv"""][word] ) elif word == "<SP>": words.append(""" """ ) elif word == "<BR>": words.append(__A ) elif word == "<TAB>": words.append("""\t""" ) elif word == "<BLOCK>": words.append("""▀""" ) elif word == "<KIGOU>": words.append("""ǀ""" ) elif word == "<U2000U2BFF>": words.append("""‖""" ) else: words.append(__A ) if len(__A ) > 0: words.append(bytearray(__A ).decode("""utf-8""" , errors="""replace""" ) ) UpperCAmelCase : List[Any] = """""".join(__A ) return text
109
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ : List[Any] ="BlipImageProcessor" SCREAMING_SNAKE_CASE_ : Optional[int] =("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , __A : Optional[int] , __A : List[Any] ): __UpperCamelCase = False super().__init__(__A , __A ) __UpperCamelCase = self.image_processor def __call__( self : List[Any] , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ): if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: __UpperCamelCase = self.tokenizer __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) return text_encoding # add pixel_values __UpperCamelCase = self.image_processor(__A , return_tensors=__A ) if text is not None: __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) else: __UpperCamelCase = None if text_encoding is not None: encoding_image_processor.update(__A ) return encoding_image_processor def _lowerCamelCase ( self : List[Any] , *__A : Dict , **__A : Optional[int] ): return self.tokenizer.batch_decode(*__A , **__A ) def _lowerCamelCase ( self : List[Any] , *__A : List[str] , **__A : Dict ): return self.tokenizer.decode(*__A , **__A ) @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.tokenizer.model_input_names __UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
53
0
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowerCamelCase__ ( A__ : Any ): '''simple docstring''' __lowerCamelCase = [] embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight', f'stage{idx}.patch_embed.proj.weight', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias', f'stage{idx}.patch_embed.proj.bias', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight', f'stage{idx}.patch_embed.norm.weight', ) ) embed.append( ( f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias', f'stage{idx}.patch_embed.norm.bias', ) ) return embed def lowerCamelCase__ ( A__ : Tuple , A__ : Dict ): '''simple docstring''' __lowerCamelCase = [] attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked', f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight', f'stage{idx}.blocks.{cnt}.attn.proj_q.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias', f'stage{idx}.blocks.{cnt}.attn.proj_q.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight', f'stage{idx}.blocks.{cnt}.attn.proj_k.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias', f'stage{idx}.blocks.{cnt}.attn.proj_k.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight', f'stage{idx}.blocks.{cnt}.attn.proj_v.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias', f'stage{idx}.blocks.{cnt}.attn.proj_v.bias', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight', f'stage{idx}.blocks.{cnt}.attn.proj.weight', ) ) attention_weights.append( ( f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias', f'stage{idx}.blocks.{cnt}.attn.proj.bias', ) ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') ) attention_weights.append( (f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') ) return attention_weights def lowerCamelCase__ ( A__ : List[Any] ): '''simple docstring''' __lowerCamelCase = [] token.append((f'cvt.encoder.stages.{idx}.cls_token', """stage2.cls_token""") ) return token def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = [] head.append(("""layernorm.weight""", """norm.weight""") ) head.append(("""layernorm.bias""", """norm.bias""") ) head.append(("""classifier.weight""", """head.weight""") ) head.append(("""classifier.bias""", """head.bias""") ) return head def lowerCamelCase__ ( A__ : str , A__ : Tuple , A__ : Any , A__ : Any ): '''simple docstring''' __lowerCamelCase = """imagenet-1k-id2label.json""" __lowerCamelCase = 1000 __lowerCamelCase = """huggingface/label-files""" __lowerCamelCase = num_labels __lowerCamelCase = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type="""dataset""" ) ) , """r""" ) ) __lowerCamelCase = {int(__lowercase ): v for k, v in idalabel.items()} __lowerCamelCase = idalabel __lowerCamelCase = {v: k for k, v in idalabel.items()} __lowerCamelCase = __lowerCamelCase = CvtConfig(num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13": __lowerCamelCase = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21": __lowerCamelCase = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: __lowerCamelCase = [2, 2, 20] __lowerCamelCase = [3, 12, 16] __lowerCamelCase = [192, 768, 1024] __lowerCamelCase = CvtForImageClassification(__lowercase ) __lowerCamelCase = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" ) __lowerCamelCase = image_size __lowerCamelCase = torch.load(__lowercase , map_location=torch.device("""cpu""" ) ) __lowerCamelCase = OrderedDict() __lowerCamelCase = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: __lowerCamelCase = list_of_state_dict + cls_token(__lowercase ) __lowerCamelCase = list_of_state_dict + embeddings(__lowercase ) for cnt in range(config.depth[idx] ): __lowerCamelCase = list_of_state_dict + attention(__lowercase , __lowercase ) __lowerCamelCase = list_of_state_dict + final() for gg in list_of_state_dict: print(__lowercase ) for i in range(len(__lowercase ) ): __lowerCamelCase = original_weights[list_of_state_dict[i][1]] model.load_state_dict(__lowercase ) model.save_pretrained(__lowercase ) image_processor.save_pretrained(__lowercase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '--cvt_model', default='cvt-w24', type=str, help='Name of the cvt model you\'d like to convert.', ) parser.add_argument( '--image_size', default=384, type=int, help='Input Image Size', ) parser.add_argument( '--cvt_file_name', default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth', type=str, help='Input Image Size', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) UpperCAmelCase_ = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
12
'''simple docstring''' from __future__ import annotations from typing import Any class snake_case ( __lowerCamelCase ): """simple docstring""" pass class snake_case : """simple docstring""" def __init__( self : List[Any] , __A : Any ): __UpperCamelCase = data __UpperCamelCase = None def __iter__( self : Optional[Any] ): __UpperCamelCase = self __UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(__A ) yield node.data __UpperCamelCase = node.next_node @property def _lowerCamelCase ( self : List[str] ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": a__ : Dict =Node(1) a__ : Optional[int] =Node(2) a__ : List[str] =Node(3) a__ : Optional[int] =Node(4) print(root_node.has_loop) # False a__ : str =root_node.next_node print(root_node.has_loop) # True a__ : Optional[int] =Node(5) a__ : List[Any] =Node(6) a__ : int =Node(5) a__ : Tuple =Node(6) print(root_node.has_loop) # False a__ : str =Node(1) print(root_node.has_loop) # False
53
0
from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __UpperCamelCase : Dict = logging.get_logger(__name__) __UpperCamelCase : Optional[int] = { '''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class lowercase__ ( __lowerCamelCase): UpperCamelCase_ = "perceiver" def __init__( self : Any , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Tuple=1280 , UpperCamelCase__ : Dict=768 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : str=26 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : Any=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[int]="kv" , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : int=1E-12 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=262 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : List[str]=56 , UpperCamelCase__ : Optional[int]=[368, 496] , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : Optional[int]=1920 , UpperCamelCase__ : Optional[int]=16 , UpperCamelCase__ : Dict=[1, 16, 224, 224] , **UpperCamelCase__ : Union[str, Any] , ): '''simple docstring''' super().__init__(**__A ) SCREAMING_SNAKE_CASE : str = num_latents SCREAMING_SNAKE_CASE : Optional[Any] = d_latents SCREAMING_SNAKE_CASE : Optional[Any] = d_model SCREAMING_SNAKE_CASE : Any = num_blocks SCREAMING_SNAKE_CASE : Dict = num_self_attends_per_block SCREAMING_SNAKE_CASE : Optional[Any] = num_self_attention_heads SCREAMING_SNAKE_CASE : Optional[int] = num_cross_attention_heads SCREAMING_SNAKE_CASE : int = qk_channels SCREAMING_SNAKE_CASE : List[str] = v_channels SCREAMING_SNAKE_CASE : Union[str, Any] = cross_attention_shape_for_attention SCREAMING_SNAKE_CASE : List[Any] = self_attention_widening_factor SCREAMING_SNAKE_CASE : List[str] = cross_attention_widening_factor SCREAMING_SNAKE_CASE : Dict = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : int = initializer_range SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE : Tuple = use_query_residual # masked language modeling attributes SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings # image classification attributes SCREAMING_SNAKE_CASE : Optional[Any] = image_size # flow attributes SCREAMING_SNAKE_CASE : Any = train_size # multimodal autoencoding attributes SCREAMING_SNAKE_CASE : int = num_frames SCREAMING_SNAKE_CASE : List[str] = audio_samples_per_frame SCREAMING_SNAKE_CASE : Optional[Any] = samples_per_patch SCREAMING_SNAKE_CASE : List[Any] = output_shape class lowercase__ ( __lowerCamelCase): @property def __A ( self : Union[str, Any] ): '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''inputs''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] ) @property def __A ( self : Optional[Any] ): '''simple docstring''' return 1E-4 def __A ( self : Any , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , ): '''simple docstring''' if isinstance(__A , __A ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX SCREAMING_SNAKE_CASE : Optional[Any] = compute_effective_axis_dimension( __A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX SCREAMING_SNAKE_CASE : List[str] = preprocessor.num_special_tokens_to_add(__A ) SCREAMING_SNAKE_CASE : Union[str, Any] = compute_effective_axis_dimension( __A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__A ) # Generate dummy inputs according to compute batch and sequence SCREAMING_SNAKE_CASE : Any = [''' '''.join(['''a'''] ) * seq_length] * batch_size SCREAMING_SNAKE_CASE : Any = dict(preprocessor(__A , return_tensors=__A ) ) SCREAMING_SNAKE_CASE : Optional[Any] = inputs.pop('''input_ids''' ) return inputs elif isinstance(__A , __A ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(__A , fixed_dimension=OnnxConfig.default_fixed_batch ) SCREAMING_SNAKE_CASE : int = self._generate_dummy_images(__A , __A , __A , __A ) SCREAMING_SNAKE_CASE : Dict = dict(preprocessor(images=__A , return_tensors=__A ) ) SCREAMING_SNAKE_CASE : List[Any] = inputs.pop('''pixel_values''' ) return inputs else: raise ValueError( '''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
182
'''simple docstring''' a__ : Optional[Any] =256 # Modulus to hash a string a__ : Dict =1_000_003 def lowercase__ ( __lowercase : str , __lowercase : str ) -> bool: """simple docstring""" __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) if p_len > t_len: return False __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 1 # Calculating the hash of pattern and substring of text for i in range(__lowercase ): __UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __UpperCamelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __UpperCamelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def lowercase__ ( ) -> None: """simple docstring""" __UpperCamelCase = 'abc1abc12' __UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __UpperCamelCase = 'alskfjaldsk23adsfabcabc' assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase ) # Test 2) __UpperCamelCase = 'ABABX' __UpperCamelCase = 'ABABZABABYABABX' assert rabin_karp(__lowercase , __lowercase ) # Test 3) __UpperCamelCase = 'AAAB' __UpperCamelCase = 'ABAAAAAB' assert rabin_karp(__lowercase , __lowercase ) # Test 4) __UpperCamelCase = 'abcdabcy' __UpperCamelCase = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(__lowercase , __lowercase ) # Test 5) __UpperCamelCase = 'Lü' __UpperCamelCase = 'Lüsai' assert rabin_karp(__lowercase , __lowercase ) __UpperCamelCase = 'Lue' assert not rabin_karp(__lowercase , __lowercase ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
53
0
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _a ( self ) -> List[Any]: __UpperCamelCase =0 @slow def _a ( self ) -> Optional[Any]: for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): __UpperCamelCase =AutoTokenizer.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(__A ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): __UpperCamelCase =AutoTokenizer.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(__A ) , 0 ) def _a ( self ) -> Any: __UpperCamelCase =AutoTokenizer.from_pretrained(__A ) self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def _a ( self ) -> Any: __UpperCamelCase =AutoTokenizer.from_pretrained(__A ) self.assertIsInstance(__A , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def _a ( self ) -> str: __UpperCamelCase =AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) # Check that tokenizer_type ≠ model_type __UpperCamelCase =AutoTokenizer.from_pretrained(__A , config=__A ) self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def _a ( self ) -> Any: with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(__A , 'vocab.txt' ) ) __UpperCamelCase =AutoTokenizer.from_pretrained(__A , tokenizer_type='bert' , use_fast=__A ) self.assertIsInstance(__A , __A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.json' , os.path.join(__A , 'vocab.json' ) ) shutil.copy('./tests/fixtures/merges.txt' , os.path.join(__A , 'merges.txt' ) ) __UpperCamelCase =AutoTokenizer.from_pretrained(__A , tokenizer_type='gpt2' , use_fast=__A ) self.assertIsInstance(__A , __A ) @require_tokenizers def _a ( self ) -> str: with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(__A , 'vocab.txt' ) ) __UpperCamelCase =AutoTokenizer.from_pretrained(__A , tokenizer_type='bert' ) self.assertIsInstance(__A , __A ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('./tests/fixtures/vocab.json' , os.path.join(__A , 'vocab.json' ) ) shutil.copy('./tests/fixtures/merges.txt' , os.path.join(__A , 'merges.txt' ) ) __UpperCamelCase =AutoTokenizer.from_pretrained(__A , tokenizer_type='gpt2' ) self.assertIsInstance(__A , __A ) def _a ( self ) -> Any: with pytest.raises(__A ): AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx' ) @require_tokenizers def _a ( self ) -> List[str]: for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: __UpperCamelCase =tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' ) self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) ) if isinstance(__A , __A ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __A ) else: self.assertEqual(tokenizer.do_lower_case , __A ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def _a ( self ) -> Optional[Any]: for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( __A , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ): __UpperCamelCase =tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' ) def _a ( self ) -> Any: # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai __UpperCamelCase =TOKENIZER_MAPPING.values() __UpperCamelCase =[] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(__A ) @require_tokenizers def _a ( self ) -> Any: self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__A ) , __A ) self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) , __A ) @require_tokenizers def _a ( self ) -> Any: __UpperCamelCase =AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=__A ) __UpperCamelCase ='Hello, world. How are you?' __UpperCamelCase =tokenizer.tokenize(__A ) self.assertEqual('[UNK]' , tokens[0] ) __UpperCamelCase =AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=__A ) __UpperCamelCase =tokenizer.tokenize(__A ) self.assertEqual('[UNK]' , tokens[0] ) @require_tokenizers def _a ( self ) -> str: __UpperCamelCase =AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' ) self.assertEqual(type(__A ) , __A ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 30000 ) self.assertEqual(tokenizer.unk_token , '[UNK]' ) self.assertEqual(tokenizer.padding_side , 'right' ) self.assertEqual(tokenizer.truncation_side , 'right' ) def _a ( self ) -> int: __UpperCamelCase =AutoTokenizer.from_pretrained(__A ) self.assertIsInstance(__A , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__A ) __UpperCamelCase =AutoTokenizer.from_pretrained(__A ) self.assertIsInstance(__A , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def _a ( self ) -> List[str]: __UpperCamelCase =AutoTokenizer.from_pretrained('ctrl' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(__A , __A ) def _a ( self ) -> Union[str, Any]: # Check we can load the tokenizer config of an online model. __UpperCamelCase =get_tokenizer_config('bert-base-cased' ) __UpperCamelCase =config.pop('_commit_hash' , __A ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(__A , {'do_lower_case': False} ) # This model does not have a tokenizer_config so we get back an empty dict. __UpperCamelCase =get_tokenizer_config(__A ) self.assertDictEqual(__A , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. __UpperCamelCase =AutoTokenizer.from_pretrained(__A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__A ) __UpperCamelCase =get_tokenizer_config(__A ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['tokenizer_class'] , 'BertTokenizer' ) def _a ( self ) -> str: try: AutoConfig.register('custom' , __A ) AutoTokenizer.register(__A , slow_tokenizer_class=__A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__A ): AutoTokenizer.register(__A , slow_tokenizer_class=__A ) __UpperCamelCase =CustomTokenizer.from_pretrained(__A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__A ) __UpperCamelCase =AutoTokenizer.from_pretrained(__A ) self.assertIsInstance(__A , __A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def _a ( self ) -> int: try: AutoConfig.register('custom' , __A ) # Can register in two steps AutoTokenizer.register(__A , slow_tokenizer_class=__A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(__A , fast_tokenizer_class=__A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( __A , slow_tokenizer_class=__A , fast_tokenizer_class=__A ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__A ): AutoTokenizer.register(__A , fast_tokenizer_class=__A ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: __UpperCamelCase =BertTokenizerFast.from_pretrained(__A ) bert_tokenizer.save_pretrained(__A ) __UpperCamelCase =CustomTokenizerFast.from_pretrained(__A ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__A ) __UpperCamelCase =AutoTokenizer.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase =AutoTokenizer.from_pretrained(__A , use_fast=__A ) self.assertIsInstance(__A , __A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _a ( self ) -> Any: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__A ): __UpperCamelCase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' ) # If remote code is disabled, we can't load this config. with self.assertRaises(__A ): __UpperCamelCase =AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A ) __UpperCamelCase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__A ) __UpperCamelCase =AutoTokenizer.from_pretrained(__A , trust_remote_code=__A ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast' ) # Test we can also load the slow version __UpperCamelCase =AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A , use_fast=__A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__A ) __UpperCamelCase =AutoTokenizer.from_pretrained(__A , trust_remote_code=__A , use_fast=__A ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' ) @require_tokenizers def _a ( self ) -> Tuple: class UpperCAmelCase__ ( __lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Any = False class UpperCAmelCase__ ( __lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : str = NewTokenizer UpperCAmelCase__ : str = False try: AutoConfig.register('custom' , __A ) AutoTokenizer.register(__A , slow_tokenizer_class=__A ) AutoTokenizer.register(__A , fast_tokenizer_class=__A ) # If remote code is not set, the default is to use local __UpperCamelCase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) self.assertFalse(tokenizer.special_attribute_present ) __UpperCamelCase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=__A ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. __UpperCamelCase =AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) self.assertFalse(tokenizer.special_attribute_present ) __UpperCamelCase =AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A , use_fast=__A ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub __UpperCamelCase =AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) self.assertTrue(tokenizer.special_attribute_present ) __UpperCamelCase =AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__A , use_fast=__A ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def _a ( self ) -> str: __UpperCamelCase =AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=__A ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' ) # Test we can also load the slow version __UpperCamelCase =AutoTokenizer.from_pretrained( 'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=__A , use_fast=__A ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) else: self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' ) def _a ( self ) -> Dict: with self.assertRaisesRegex( __A , 'bert-base is not a local folder and is not a valid model identifier' ): __UpperCamelCase =AutoTokenizer.from_pretrained('bert-base' ) def _a ( self ) -> Optional[Any]: with self.assertRaisesRegex( __A , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): __UpperCamelCase =AutoTokenizer.from_pretrained(__A , revision='aaaaaa' ) def _a ( self ) -> Any: # Make sure we have cached the tokenizer. __UpperCamelCase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) with RequestCounter() as counter: __UpperCamelCase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
62
'''simple docstring''' from __future__ import annotations class snake_case : """simple docstring""" def __init__( self : Optional[int] , __A : list[list[int]] ): __UpperCamelCase = TypeError( 'Matrices must be formed from a list of zero or more lists containing at ' 'least one and the same number of values, each of which must be of type ' 'int or float.' ) if len(__A ) != 0: __UpperCamelCase = len(rows[0] ) if cols == 0: raise error for row in rows: if len(__A ) != cols: raise error for value in row: if not isinstance(__A , (int, float) ): raise error __UpperCamelCase = rows else: __UpperCamelCase = [] def _lowerCamelCase ( self : int ): return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def _lowerCamelCase ( self : str ): return len(self.rows ) @property def _lowerCamelCase ( self : Any ): return len(self.rows[0] ) @property def _lowerCamelCase ( self : Optional[Any] ): return (self.num_rows, self.num_columns) @property def _lowerCamelCase ( self : Dict ): return self.order[0] == self.order[1] def _lowerCamelCase ( self : Any ): __UpperCamelCase = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Any ): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def _lowerCamelCase ( self : List[str] ): return bool(self.determinant() ) def _lowerCamelCase ( self : Dict , __A : int , __A : int ): __UpperCamelCase = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(__A ).determinant() def _lowerCamelCase ( self : Dict , __A : int , __A : int ): if (row + column) % 2 == 0: return self.get_minor(__A , __A ) return -1 * self.get_minor(__A , __A ) def _lowerCamelCase ( self : List[str] ): return Matrix( [ [self.get_minor(__A , __A ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def _lowerCamelCase ( self : Union[str, Any] ): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(__A ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = self.determinant() if not determinant: raise TypeError('Only matrices with a non-zero determinant have an inverse' ) return self.adjugate() * (1 / determinant) def __repr__( self : Optional[Any] ): return str(self.rows ) def __str__( self : Union[str, Any] ): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '[' + '. '.join([str(__A ) for value in row] ) + '.]' for row in self.rows ] ) + "]" ) def _lowerCamelCase ( self : List[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError('Row must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in row: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_columns: raise ValueError( 'Row must be equal in length to the other rows in the matrix' ) if position is None: self.rows.append(__A ) else: __UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:] def _lowerCamelCase ( self : Optional[Any] , __A : list[int] , __A : int | None = None ): __UpperCamelCase = TypeError( 'Column must be a list containing all ints and/or floats' ) if not isinstance(__A , __A ): raise type_error for value in column: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_rows: raise ValueError( 'Column must be equal in length to the other columns in the matrix' ) if position is None: __UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: __UpperCamelCase = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self : Tuple , __A : object ): if not isinstance(__A , __A ): return NotImplemented return self.rows == other.rows def __ne__( self : Any , __A : object ): return not self == other def __neg__( self : List[Any] ): return self * -1 def __add__( self : List[str] , __A : Matrix ): if self.order != other.order: raise ValueError('Addition requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self : str , __A : Matrix ): if self.order != other.order: raise ValueError('Subtraction requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self : str , __A : Matrix | int | float ): if isinstance(__A , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(__A , __A ): if self.num_columns != other.num_rows: raise ValueError( 'The number of columns in the first matrix must ' 'be equal to the number of rows in the second' ) return Matrix( [ [Matrix.dot_product(__A , __A ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( 'A Matrix can only be multiplied by an int, float, or another matrix' ) def __pow__( self : Union[str, Any] , __A : int ): if not isinstance(__A , __A ): raise TypeError('A Matrix can only be raised to the power of an int' ) if not self.is_square: raise ValueError('Only square matrices can be raised to a power' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( 'Only invertable matrices can be raised to a negative power' ) __UpperCamelCase = self for _ in range(other - 1 ): result *= self return result @classmethod def _lowerCamelCase ( cls : Tuple , __A : list[int] , __A : list[int] ): return sum(row[i] * column[i] for i in range(len(__A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
53
0
'''simple docstring''' from functools import reduce lowerCAmelCase_ : Optional[Any] = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def _lowerCamelCase ( lowercase : str = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda lowercase , lowercase : str(int(__lowercase ) * int(__lowercase ) ) , n[i : i + 13] ) ) for i in range(len(__lowercase ) - 12 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
63
'''simple docstring''' import os import numpy import onnx def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Dict: """simple docstring""" __UpperCamelCase = a.name __UpperCamelCase = b.name __UpperCamelCase = '' __UpperCamelCase = '' __UpperCamelCase = a == b __UpperCamelCase = name_a __UpperCamelCase = name_b return res def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : List[Any] ) -> Optional[int]: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(__lowercase , __lowercase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) _graph_replace_input_with(node_proto.attribute[1].g , __lowercase , __lowercase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , __lowercase , __lowercase ) def lowercase__ ( __lowercase : int , __lowercase : List[Any] , __lowercase : Dict ) -> int: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(__lowercase , __lowercase , __lowercase ) def lowercase__ ( __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : str ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __UpperCamelCase = inits[i].name __UpperCamelCase = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , __lowercase , __lowercase ) def lowercase__ ( __lowercase : Dict ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = os.path.dirname(__lowercase ) __UpperCamelCase = os.path.basename(__lowercase ) __UpperCamelCase = onnx.load(os.path.join(__lowercase , __lowercase ) ) __UpperCamelCase = list(model.graph.initializer ) __UpperCamelCase = set() __UpperCamelCase = {} __UpperCamelCase = [] __UpperCamelCase = 0 for i in range(len(__lowercase ) ): if i in dup_set: continue for j in range(i + 1 , len(__lowercase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(__lowercase ) dup_set.add(__lowercase ) __UpperCamelCase = inits[j].data_type __UpperCamelCase = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , __lowercase ) total_reduced_size += mem_size __UpperCamelCase = inits[i].name __UpperCamelCase = inits[j].name if name_i in dup_map: dup_map[name_i].append(__lowercase ) else: __UpperCamelCase = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) __UpperCamelCase = sorted(__lowercase ) _remove_dup_initializers_from_model(__lowercase , __lowercase , __lowercase ) __UpperCamelCase = 'optimized_' + model_file_name __UpperCamelCase = os.path.join(__lowercase , __lowercase ) onnx.save(__lowercase , __lowercase ) return new_model
53
0
'''simple docstring''' from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class lowerCAmelCase__ ( __lowerCamelCase ): lowerCAmelCase : Any = "dandelin/vilt-b32-finetuned-vqa" lowerCAmelCase : Optional[int] = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) lowerCAmelCase : Tuple = "image_qa" lowerCAmelCase : List[str] = AutoProcessor lowerCAmelCase : List[Any] = AutoModelForVisualQuestionAnswering lowerCAmelCase : Optional[Any] = ["image", "text"] lowerCAmelCase : Optional[int] = ["text"] def __init__( self : List[str] , *lowerCamelCase__ : str , **lowerCamelCase__ : List[str] ) ->List[str]: '''simple docstring''' requires_backends(self , ["vision"] ) super().__init__(*__A , **__A ) def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : "Image" , lowerCamelCase__ : str ) ->str: '''simple docstring''' return self.pre_processor(__A , __A , return_tensors="pt" ) def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Tuple ) ->Optional[Any]: '''simple docstring''' with torch.no_grad(): return self.model(**__A ).logits def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : int ) ->Any: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
234
'''simple docstring''' import random def lowercase__ ( __lowercase : list , __lowercase : Optional[Any] ) -> tuple: """simple docstring""" __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = [], [], [] for element in data: if element < pivot: less.append(__lowercase ) elif element > pivot: greater.append(__lowercase ) else: equal.append(__lowercase ) return less, equal, greater def lowercase__ ( __lowercase : list , __lowercase : int ) -> Dict: """simple docstring""" if index >= len(__lowercase ) or index < 0: return None __UpperCamelCase = items[random.randint(0 , len(__lowercase ) - 1 )] __UpperCamelCase = 0 __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = _partition(__lowercase , __lowercase ) __UpperCamelCase = len(__lowercase ) __UpperCamelCase = len(__lowercase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__lowercase , __lowercase ) # must be in larger else: return quick_select(__lowercase , index - (m + count) )
53
0
'''simple docstring''' import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = { '''nvidia/segformer-b0-finetuned-ade-512-512''': ( '''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json''' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class _lowerCAmelCase ( __lowerCamelCase ): """simple docstring""" lowerCamelCase = "segformer" def __init__( self , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=[2, 2, 2, 2] , _lowerCamelCase=[8, 4, 2, 1] , _lowerCamelCase=[32, 64, 160, 256] , _lowerCamelCase=[7, 3, 3, 3] , _lowerCamelCase=[4, 2, 2, 2] , _lowerCamelCase=[1, 2, 5, 8] , _lowerCamelCase=[4, 4, 4, 4] , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.02 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-6 , _lowerCamelCase=256 , _lowerCamelCase=255 , **_lowerCamelCase , ) -> Tuple: super().__init__(**__A ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""" , __A , ) A_ : Optional[Any] = num_channels A_ : Tuple = num_encoder_blocks A_ : str = depths A_ : List[Any] = sr_ratios A_ : List[Any] = hidden_sizes A_ : Optional[Any] = patch_sizes A_ : Optional[Any] = strides A_ : List[str] = mlp_ratios A_ : str = num_attention_heads A_ : Optional[int] = hidden_act A_ : List[Any] = hidden_dropout_prob A_ : Any = attention_probs_dropout_prob A_ : str = classifier_dropout_prob A_ : str = initializer_range A_ : str = drop_path_rate A_ : List[str] = layer_norm_eps A_ : Any = decoder_hidden_size A_ : List[Any] = kwargs.get("""reshape_last_stage""" , __A ) A_ : int = semantic_loss_ignore_index class _lowerCAmelCase ( __lowerCamelCase ): """simple docstring""" lowerCamelCase = version.parse('''1.11''' ) @property def UpperCAmelCase_ ( self ) -> Optional[int]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCAmelCase_ ( self ) -> int: return 1e-4 @property def UpperCAmelCase_ ( self ) -> Dict: return 12
344
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowercase__ ( __lowercase : Any ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def lowercase__ ( __lowercase : Tuple ) -> int: """simple docstring""" __UpperCamelCase , __UpperCamelCase = emb.weight.shape __UpperCamelCase = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) __UpperCamelCase = emb.weight.data return lin_layer def lowercase__ ( __lowercase : int , __lowercase : List[str]="facebook/mbart-large-en-ro" , __lowercase : str=False , __lowercase : List[Any]=False ) -> int: """simple docstring""" __UpperCamelCase = torch.load(__lowercase , map_location='cpu' )['model'] remove_ignore_keys_(__lowercase ) __UpperCamelCase = state_dict['encoder.embed_tokens.weight'].shape[0] __UpperCamelCase = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase ) if mbart_aa and finetuned: __UpperCamelCase = 'relu' __UpperCamelCase = state_dict['decoder.embed_tokens.weight'] __UpperCamelCase = MBartForConditionalGeneration(__lowercase ) model.model.load_state_dict(__lowercase ) if finetuned: __UpperCamelCase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a__ : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') a__ : Union[str, Any] =parser.parse_args() a__ : str =convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
53
0
'''simple docstring''' lowercase__ = '''Tobias Carryer''' from time import time class A_ : '''simple docstring''' def __init__( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : str=int(time() ) ) -> List[Any]: # noqa: B008 UpperCAmelCase : Any = multiplier UpperCAmelCase : Tuple = increment UpperCAmelCase : List[str] = modulo UpperCAmelCase : Union[str, Any] = seed def UpperCAmelCase_ ( self : Optional[int] ) -> int: UpperCAmelCase : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. lowercase__ = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31) while True: print(lcg.next_number())
151
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : Any , __A : Dict , __A : str , __A : List[Any]=1_0_2_4 , __A : Tuple=1_0_2_4 , __A : str=3.6 ): __UpperCamelCase = tokenizer __UpperCamelCase = tokenizer.bos_token_id __UpperCamelCase = dataset __UpperCamelCase = seq_length __UpperCamelCase = seq_length * chars_per_token * num_of_sequences def __iter__( self : Any ): __UpperCamelCase = iter(self.dataset ) __UpperCamelCase = True while more_examples: __UpperCamelCase , __UpperCamelCase = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(__A )['content'] ) buffer_len += len(buffer[-1] ) except StopIteration: __UpperCamelCase = False break __UpperCamelCase = tokenizer(__A , truncation=__A )['input_ids'] __UpperCamelCase = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(__A ) , self.seq_length ): __UpperCamelCase = all_token_ids[i : i + self.seq_length] if len(__A ) == self.seq_length: yield torch.tensor(__A ) def lowercase__ ( __lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = {'streaming': True} __UpperCamelCase = load_dataset(args.dataset_name , split='train' , **__lowercase ) __UpperCamelCase = ConstantLengthDataset(__lowercase , __lowercase , seq_length=args.seq_length ) __UpperCamelCase = DataLoader(__lowercase , batch_size=args.batch_size ) return eval_dataloader def lowercase__ ( __lowercase : Tuple ) -> Optional[Any]: """simple docstring""" model.eval() __UpperCamelCase = [] for step, batch in enumerate(__lowercase ): with torch.no_grad(): __UpperCamelCase = model(__lowercase , labels=__lowercase ) __UpperCamelCase = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__lowercase ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break __UpperCamelCase = torch.mean(torch.cat(__lowercase ) ) try: __UpperCamelCase = torch.exp(__lowercase ) except OverflowError: __UpperCamelCase = float('inf' ) return loss.item(), perplexity.item() # Setup Accelerator a__ : int =Accelerator() # Parse configuration a__ : Dict =HfArgumentParser(EvaluationArguments) a__ : Union[str, Any] =parser.parse_args() set_seed(args.seed) # Logging a__ : List[Any] =logging.getLogger(__name__) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) # Load model and tokenizer a__ : Union[str, Any] =AutoModelForCausalLM.from_pretrained(args.model_ckpt) a__ : List[Any] =AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader a__ : Union[str, Any] =create_dataloader(args) # Prepare everything with our `accelerator`. a__ , a__ : List[str] =accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info('''Evaluating and saving model after training''') a__ , a__ : Any =evaluate(args) logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
53
0
from __future__ import annotations __UpperCAmelCase = list[list[int]] # assigning initial values to the grid __UpperCAmelCase = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __UpperCAmelCase = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def A__ ( __lowerCamelCase ): for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def A__ ( __lowerCamelCase ): if location := find_empty_location(__lowercase ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1, 10 ): if is_safe(__lowercase, __lowercase, __lowercase, __lowercase ): SCREAMING_SNAKE_CASE_ = digit if sudoku(__lowercase ) is not None: return grid SCREAMING_SNAKE_CASE_ = 0 return None def A__ ( __lowerCamelCase ): for row in grid: for cell in row: print(__lowercase, end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") __UpperCAmelCase = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
299
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging a__ : Any =logging.get_logger(__name__) a__ : Optional[Any] ={ '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict ="gpt_neo" SCREAMING_SNAKE_CASE_ : Optional[int] =["past_key_values"] SCREAMING_SNAKE_CASE_ : List[Any] ={"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Union[str, Any] , __A : Union[str, Any]=5_0_2_5_7 , __A : Any=2_0_4_8 , __A : Optional[Any]=2_0_4_8 , __A : Any=2_4 , __A : Union[str, Any]=[[["global", "local"], 1_2]] , __A : str=1_6 , __A : Optional[int]=None , __A : Union[str, Any]=2_5_6 , __A : Any="gelu_new" , __A : Dict=0.0 , __A : Optional[int]=0.0 , __A : int=0.0 , __A : List[str]=0.1 , __A : Any=1e-5 , __A : int=0.02 , __A : List[str]=True , __A : Tuple=5_0_2_5_6 , __A : Optional[Any]=5_0_2_5_6 , **__A : Optional[Any] , ): __UpperCamelCase = vocab_size __UpperCamelCase = max_position_embeddings __UpperCamelCase = hidden_size __UpperCamelCase = num_layers __UpperCamelCase = num_heads __UpperCamelCase = intermediate_size __UpperCamelCase = window_size __UpperCamelCase = activation_function __UpperCamelCase = resid_dropout __UpperCamelCase = embed_dropout __UpperCamelCase = attention_dropout __UpperCamelCase = classifier_dropout __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = initializer_range __UpperCamelCase = use_cache __UpperCamelCase = bos_token_id __UpperCamelCase = eos_token_id __UpperCamelCase = attention_types __UpperCamelCase = self.expand_attention_types_params(__A ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=__A , eos_token_id=__A , **__A ) @staticmethod def _lowerCamelCase ( __A : Tuple ): __UpperCamelCase = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowercase__ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Any: """simple docstring""" import torch __UpperCamelCase = input.size() __UpperCamelCase = len(__lowercase ) __UpperCamelCase = shape[dimension] __UpperCamelCase = torch.arange(0 , __lowercase , __lowercase ) __UpperCamelCase = torch.div(sizedim - size , __lowercase , rounding_mode='floor' ) + 1 __UpperCamelCase = torch.arange(__lowercase ) + low_indices[:min_length][:, None] __UpperCamelCase = [slice(__lowercase )] * rank __UpperCamelCase = indices __UpperCamelCase = input[s] __UpperCamelCase = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__lowercase ) def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Optional[int]: """simple docstring""" import torch __UpperCamelCase = torch.arange(1 , __lowercase ) __UpperCamelCase = torch.remainder(__lowercase , __lowercase ) __UpperCamelCase = remainders == 0 __UpperCamelCase = candidates[divisor_indices] __UpperCamelCase = torch.max(__lowercase ) return largest_divisor, torch.div(__lowercase , __lowercase , rounding_mode='floor' ) class snake_case ( __lowerCamelCase ): """simple docstring""" @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(__A , direction='inputs' ) __UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'sequence'} return common_inputs @property def _lowerCamelCase ( self : int ): return self._config.num_heads def _lowerCamelCase ( self : List[str] , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ): __UpperCamelCase = super(__A , self ).generate_dummy_inputs( __A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A ) # We need to order the input in the way they appears in the forward() __UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCamelCase = [ (torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers ) ] __UpperCamelCase = common_inputs['attention_mask'] if self.use_past: __UpperCamelCase = ordered_inputs['attention_mask'].dtype __UpperCamelCase = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 ) return ordered_inputs @property def _lowerCamelCase ( self : Dict ): return 1_3
53
0
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def lowerCamelCase__ ( _A ): a : List[Any] = 384 if "tiny" in model_name: a : List[str] = [3, 3, 9, 3] a : Optional[int] = [96, 192, 384, 768] if "small" in model_name: a : int = [3, 3, 27, 3] a : Dict = [96, 192, 384, 768] if "base" in model_name: a : Union[str, Any] = [3, 3, 27, 3] a : str = [128, 256, 512, 1024] a : Any = 512 if "large" in model_name: a : Optional[Any] = [3, 3, 27, 3] a : Optional[Any] = [192, 384, 768, 1536] a : Optional[Any] = 768 if "xlarge" in model_name: a : str = [3, 3, 27, 3] a : Optional[Any] = [256, 512, 1024, 2048] a : List[str] = 1024 # set label information a : List[str] = 150 a : Union[str, Any] = 'huggingface/label-files' a : int = 'ade20k-id2label.json' a : Optional[int] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset' ) , 'r' ) ) a : Tuple = {int(__lowercase ): v for k, v in idalabel.items()} a : List[str] = {v: k for k, v in idalabel.items()} a : List[Any] = ConvNextConfig( depths=__lowercase , hidden_sizes=__lowercase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) a : List[str] = UperNetConfig( backbone_config=__lowercase , auxiliary_in_channels=__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase , ) return config def lowerCamelCase__ ( _A ): a : Optional[Any] = [] # fmt: off # stem rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') ) rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') ) rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') ) rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"""backbone.stages.{i}.{j}.gamma""", f"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.norm.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.norm.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") ) if i > 0: rename_keys.append((f"""backbone.downsample_layers.{i}.0.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") ) rename_keys.append((f"""backbone.downsample_layers.{i}.0.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") ) rename_keys.append((f"""backbone.downsample_layers.{i}.1.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") ) rename_keys.append((f"""backbone.downsample_layers.{i}.1.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") ) rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def lowerCamelCase__ ( _A , _A , _A ): a : Tuple = dct.pop(__lowercase ) a : List[Any] = val def lowerCamelCase__ ( _A , _A , _A ): a : Tuple = { 'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth', 'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth', 'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth', 'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth', 'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth', } a : Optional[Any] = model_name_to_url[model_name] a : List[Any] = torch.hub.load_state_dict_from_url(__lowercase , map_location='cpu' )['state_dict'] a : Dict = get_upernet_config(__lowercase ) a : Dict = UperNetForSemanticSegmentation(__lowercase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): a : str = state_dict.pop(__lowercase ) if "bn" in key: a : Union[str, Any] = key.replace('bn' , 'batch_norm' ) a : int = val # rename keys a : Optional[Any] = create_rename_keys(__lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) model.load_state_dict(__lowercase ) # verify on image a : Optional[Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' a : Any = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('RGB' ) a : str = SegformerImageProcessor() a : Any = processor(__lowercase , return_tensors='pt' ).pixel_values with torch.no_grad(): a : List[str] = model(__lowercase ) if model_name == "upernet-convnext-tiny": a : List[Any] = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": a : int = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": a : Optional[int] = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": a : str = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": a : Any = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowercase , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowercase ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(__lowercase ) if push_to_hub: print(f"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(f"""openmmlab/{model_name}""" ) processor.push_to_hub(f"""openmmlab/{model_name}""" ) if __name__ == "__main__": lowerCAmelCase: Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-convnext-tiny', type=str, choices=[F"upernet-convnext-{size}" for size in ['tiny', 'small', 'base', 'large', 'xlarge']], help='Name of the ConvNext UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) lowerCAmelCase: List[Any] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
297
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa" SCREAMING_SNAKE_CASE_ : Dict =( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) SCREAMING_SNAKE_CASE_ : List[str] ="document_qa" SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"] SCREAMING_SNAKE_CASE_ : Any =["text"] def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ): if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*__A , **__A ) def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ): __UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' __UpperCamelCase = task_prompt.replace('{user_input}' , __A ) __UpperCamelCase = self.pre_processor.tokenizer( __A , add_special_tokens=__A , return_tensors='pt' ).input_ids __UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ): return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences def _lowerCamelCase ( self : Tuple , __A : List[Any] ): __UpperCamelCase = self.pre_processor.batch_decode(__A )[0] __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) __UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) __UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token __UpperCamelCase = self.pre_processor.tokenajson(__A ) return sequence["answer"]
53
0
"""simple docstring""" def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> int: if not numbers: return 0 if not isinstance(__lowercase ,(list, tuple) ) or not all( isinstance(__lowercase ,__lowercase ) for number in numbers ): raise ValueError("""numbers must be an iterable of integers""" ) _lowerCAmelCase : List[str] = numbers[0] for i in range(1 ,len(__lowercase ) ): # update the maximum and minimum subarray products _lowerCAmelCase : Any = numbers[i] if number < 0: _lowerCAmelCase , _lowerCAmelCase : Tuple = min_till_now, max_till_now _lowerCAmelCase : Dict = max(__lowercase ,max_till_now * number ) _lowerCAmelCase : Tuple = min(__lowercase ,min_till_now * number ) # update the maximum product found till now _lowerCAmelCase : str = max(__lowercase ,__lowercase ) return max_prod
44
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
53
0
"""simple docstring""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase ): __lowerCAmelCase : Any = ["image_processor", "tokenizer"] __lowerCAmelCase : List[Any] = "BlipImageProcessor" __lowerCAmelCase : Optional[int] = ("BertTokenizer", "BertTokenizerFast") def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Optional[Any] = False super().__init__(__A , __A ) UpperCAmelCase : int = self.image_processor def __call__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Tuple: '''simple docstring''' if images is None and text is None: raise ValueError("""You have to specify either images or text.""" ) # Get only text if images is None: UpperCAmelCase : Any = self.tokenizer UpperCAmelCase : Optional[Any] = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) return text_encoding # add pixel_values UpperCAmelCase : List[Any] = self.image_processor(__A , return_tensors=__A ) if text is not None: UpperCAmelCase : Dict = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) else: UpperCAmelCase : Dict = None if text_encoding is not None: encoding_image_processor.update(__A ) return encoding_image_processor def SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' return self.tokenizer.batch_decode(*__A , **__A ) def SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' return self.tokenizer.decode(*__A , **__A ) @property def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' UpperCAmelCase : List[Any] = self.tokenizer.model_input_names UpperCAmelCase : Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
109
'''simple docstring''' import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase__ ( __lowercase : Features ) -> Optional[int]: """simple docstring""" __UpperCamelCase = np.inf def set_batch_size(__lowercase : FeatureType ) -> None: nonlocal batch_size if isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary": __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__lowercase , __lowercase ) return None if batch_size is np.inf else batch_size class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : List[str] , __A : NestedDataStructureLike[PathLike] , __A : Optional[NamedSplit] = None , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[int] = None , **__A : Dict , ): super().__init__( __A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , ) __UpperCamelCase = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths} __UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1] __UpperCamelCase = Parquet( cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , ) def _lowerCamelCase ( self : Optional[int] ): # Build iterable dataset if self.streaming: __UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None self.builder.download_and_prepare( download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , ) __UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=__A , in_memory=self.keep_in_memory ) return dataset class snake_case : """simple docstring""" def __init__( self : List[str] , __A : Dataset , __A : Union[PathLike, BinaryIO] , __A : Optional[int] = None , **__A : Dict , ): __UpperCamelCase = dataset __UpperCamelCase = path_or_buf __UpperCamelCase = batch_size or get_writer_batch_size(dataset.features ) __UpperCamelCase = parquet_writer_kwargs def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , 'wb+' ) as buffer: __UpperCamelCase = self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs ) else: __UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs ) return written def _lowerCamelCase ( self : List[str] , __A : BinaryIO , __A : int , **__A : List[str] ): __UpperCamelCase = 0 __UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __A ) __UpperCamelCase = self.dataset.features.arrow_schema __UpperCamelCase = pq.ParquetWriter(__A , schema=__A , **__A ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): __UpperCamelCase = query_table( table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__A ) written += batch.nbytes writer.close() return written
53
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCAmelCase_ = { '''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ '''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FalconForCausalLM''', '''FalconModel''', '''FalconPreTrainedModel''', '''FalconForSequenceClassification''', '''FalconForTokenClassification''', '''FalconForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
12
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( 'split_dict' , [ SplitDict(), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ), SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'train': SplitInfo()} ), ] , ) def lowercase__ ( __lowercase : SplitDict ) -> int: """simple docstring""" __UpperCamelCase = split_dict._to_yaml_list() assert len(__lowercase ) == len(__lowercase ) __UpperCamelCase = SplitDict._from_yaml_list(__lowercase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump __UpperCamelCase = None # the split name of split_dict takes over the name of the split info object __UpperCamelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( 'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] ) def lowercase__ ( __lowercase : Dict ) -> Any: """simple docstring""" __UpperCamelCase = asdict(SplitDict({'train': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
53
0
from __future__ import annotations class lowercase__ : def __init__( self : Optional[int] , UpperCamelCase__ : list[list[int]] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = TypeError( '''Matrices must be formed from a list of zero or more lists containing at ''' '''least one and the same number of values, each of which must be of type ''' '''int or float.''' ) if len(__A ) != 0: SCREAMING_SNAKE_CASE : Optional[int] = len(rows[0] ) if cols == 0: raise error for row in rows: if len(__A ) != cols: raise error for value in row: if not isinstance(__A , (int, float) ): raise error SCREAMING_SNAKE_CASE : int = rows else: SCREAMING_SNAKE_CASE : int = [] def __A ( self : int ): '''simple docstring''' return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def __A ( self : str ): '''simple docstring''' return len(self.rows ) @property def __A ( self : Any ): '''simple docstring''' return len(self.rows[0] ) @property def __A ( self : Optional[Any] ): '''simple docstring''' return (self.num_rows, self.num_columns) @property def __A ( self : Dict ): '''simple docstring''' return self.order[0] == self.order[1] def __A ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(__A ) def __A ( self : Any ): '''simple docstring''' if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def __A ( self : List[str] ): '''simple docstring''' return bool(self.determinant() ) def __A ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(__A ).determinant() def __A ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int ): '''simple docstring''' if (row + column) % 2 == 0: return self.get_minor(__A , __A ) return -1 * self.get_minor(__A , __A ) def __A ( self : List[str] ): '''simple docstring''' return Matrix( [ [self.get_minor(__A , __A ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def __A ( self : Union[str, Any] ): '''simple docstring''' return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def __A ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(__A ) def __A ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.determinant() if not determinant: raise TypeError('''Only matrices with a non-zero determinant have an inverse''' ) return self.adjugate() * (1 / determinant) def __repr__( self : Optional[Any] ): '''simple docstring''' return str(self.rows ) def __str__( self : Union[str, Any] ): '''simple docstring''' if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '''[''' + '''. '''.join([str(__A ) for value in row] ) + '''.]''' for row in self.rows ] ) + "]" ) def __A ( self : List[Any] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int | None = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = TypeError('''Row must be a list containing all ints and/or floats''' ) if not isinstance(__A , __A ): raise type_error for value in row: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_columns: raise ValueError( '''Row must be equal in length to the other rows in the matrix''' ) if position is None: self.rows.append(__A ) else: SCREAMING_SNAKE_CASE : List[str] = self.rows[0:position] + [row] + self.rows[position:] def __A ( self : Optional[Any] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int | None = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = TypeError( '''Column must be a list containing all ints and/or floats''' ) if not isinstance(__A , __A ): raise type_error for value in column: if not isinstance(__A , (int, float) ): raise type_error if len(__A ) != self.num_rows: raise ValueError( '''Column must be equal in length to the other columns in the matrix''' ) if position is None: SCREAMING_SNAKE_CASE : Tuple = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: SCREAMING_SNAKE_CASE : Optional[Any] = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self : Tuple , UpperCamelCase__ : object ): '''simple docstring''' if not isinstance(__A , __A ): return NotImplemented return self.rows == other.rows def __ne__( self : Any , UpperCamelCase__ : object ): '''simple docstring''' return not self == other def __neg__( self : List[Any] ): '''simple docstring''' return self * -1 def __add__( self : List[str] , UpperCamelCase__ : Matrix ): '''simple docstring''' if self.order != other.order: raise ValueError('''Addition requires matrices of the same order''' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self : str , UpperCamelCase__ : Matrix ): '''simple docstring''' if self.order != other.order: raise ValueError('''Subtraction requires matrices of the same order''' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self : str , UpperCamelCase__ : Matrix | int | float ): '''simple docstring''' if isinstance(__A , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(__A , __A ): if self.num_columns != other.num_rows: raise ValueError( '''The number of columns in the first matrix must ''' '''be equal to the number of rows in the second''' ) return Matrix( [ [Matrix.dot_product(__A , __A ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( '''A Matrix can only be multiplied by an int, float, or another matrix''' ) def __pow__( self : Union[str, Any] , UpperCamelCase__ : int ): '''simple docstring''' if not isinstance(__A , __A ): raise TypeError('''A Matrix can only be raised to the power of an int''' ) if not self.is_square: raise ValueError('''Only square matrices can be raised to a power''' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( '''Only invertable matrices can be raised to a negative power''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = self for _ in range(other - 1 ): result *= self return result @classmethod def __A ( cls : Tuple , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] ): '''simple docstring''' return sum(row[i] * column[i] for i in range(len(__A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
182
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : List[str] ={ '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any =[ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
0
import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _A = abspath(join(dirname(dirname(dirname(__file__))), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__lowercase ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ): from transformers.testing_utils import pytest_terminal_summary_main __UpperCamelCase =terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(__lowercase , id=__lowercase )
62
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a__ : str =logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str =["input_features", "attention_mask"] def __init__( self : Union[str, Any] , __A : Optional[int]=8_0 , __A : Tuple=1_6_0_0_0 , __A : Optional[Any]=8_0 , __A : Any=0.0 , __A : Any=True , __A : List[str]=True , __A : str=True , **__A : List[Any] , ): super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A ) __UpperCamelCase = num_mel_bins __UpperCamelCase = do_ceptral_normalize __UpperCamelCase = normalize_means __UpperCamelCase = normalize_vars __UpperCamelCase = True def _lowerCamelCase ( self : Union[str, Any] , __A : np.ndarray , ): __UpperCamelCase = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers __UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 ) __UpperCamelCase = ta_kaldi.fbank(__A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _lowerCamelCase ( __A : np.ndarray , __A : int , __A : Optional[bool] = True , __A : Optional[bool] = True , __A : float = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: __UpperCamelCase = x[:input_length].mean(axis=0 ) __UpperCamelCase = np.subtract(__A , __A ) if normalize_vars: __UpperCamelCase = x[:input_length].std(axis=0 ) __UpperCamelCase = np.divide(__A , __A ) if input_length < x.shape[0]: __UpperCamelCase = padding_value # make sure array is in float32 __UpperCamelCase = x.astype(np.floataa ) return x def _lowerCamelCase ( self : int , __A : List[np.ndarray] , __A : Optional[np.ndarray] = None ): __UpperCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__A , __A , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(__A , __A ) ] def __call__( self : List[Any] , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : Union[bool, str, PaddingStrategy] = False , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[int] = None , __A : Optional[bool] = None , **__A : Dict , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) __UpperCamelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) __UpperCamelCase = is_batched_numpy or ( isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__A , np.ndarray ): __UpperCamelCase = np.asarray(__A , dtype=np.floataa ) elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __UpperCamelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __UpperCamelCase = [raw_speech] # extract fbank features __UpperCamelCase = [self._extract_fbank_features(__A ) for waveform in raw_speech] # convert into correct format for padding __UpperCamelCase = BatchFeature({'input_features': features} ) __UpperCamelCase = self.pad( __A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , ) # make sure list is in array format __UpperCamelCase = padded_inputs.get('input_features' ) if isinstance(input_features[0] , __A ): __UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_features] __UpperCamelCase = padded_inputs.get('attention_mask' ) if attention_mask is not None: __UpperCamelCase = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __UpperCamelCase = ( np.array(__A , dtype=np.intaa ) if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD else None ) __UpperCamelCase = self.normalize( padded_inputs['input_features'] , attention_mask=__A ) if return_tensors is not None: __UpperCamelCase = padded_inputs.convert_to_tensors(__A ) return padded_inputs
53
0
'''simple docstring''' import unittest import numpy as np def _lowerCamelCase ( lowercase : np.ndarray , lowercase : np.ndarray , lowercase : np.ndarray , lowercase : np.ndarray | None = None , ) -> np.ndarray: _a = np.shape(__lowercase ) _a = np.shape(__lowercase ) _a = np.shape(__lowercase ) if shape_a[0] != shape_b[0]: _a = ( "Expected the same number of rows for A and B. " F'Instead found A of size {shape_a} and B of size {shape_b}' ) raise ValueError(__lowercase ) if shape_b[1] != shape_c[1]: _a = ( "Expected the same number of columns for B and C. " F'Instead found B of size {shape_b} and C of size {shape_c}' ) raise ValueError(__lowercase ) _a = pseudo_inv if a_inv is None: try: _a = np.linalg.inv(__lowercase ) except np.linalg.LinAlgError: raise ValueError( "Input matrix A is not invertible. Cannot compute Schur complement." ) return mat_c - mat_b.T @ a_inv @ mat_b class __SCREAMING_SNAKE_CASE (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self : Tuple ): _a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a = np.array([[0, 3], [3, 0], [2, 3]] ) _a = np.array([[2, 1], [6, 3]] ) _a = schur_complement(__A , __A , __A ) _a = np.block([[a, b], [b.T, c]] ) _a = np.linalg.det(__A ) _a = np.linalg.det(__A ) _a = np.linalg.det(__A ) self.assertAlmostEqual(__A , det_a * det_s ) def UpperCamelCase__ ( self : int ): _a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a = np.array([[0, 3], [3, 0], [2, 3]] ) _a = np.array([[2, 1], [6, 3]] ) with self.assertRaises(__A ): schur_complement(__A , __A , __A ) def UpperCamelCase__ ( self : List[Any] ): _a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a = np.array([[0, 3], [3, 0], [2, 3]] ) _a = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(__A ): schur_complement(__A , __A , __A ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
63
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : List[Any] =logging.get_logger(__name__) a__ : List[Any] ={ '''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model" def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ): super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = project_dim class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model" def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ): super().__init__(**__A ) __UpperCamelCase = hidden_size __UpperCamelCase = intermediate_size __UpperCamelCase = projection_dim __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = num_channels __UpperCamelCase = patch_size __UpperCamelCase = image_size __UpperCamelCase = initializer_range __UpperCamelCase = initializer_factor __UpperCamelCase = attention_dropout __UpperCamelCase = layer_norm_eps __UpperCamelCase = hidden_act @classmethod def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ): cls._set_token_in_kwargs(__A ) __UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('model_type' ) == "altclip": __UpperCamelCase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] ="altclip" SCREAMING_SNAKE_CASE_ : Optional[int] =True def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). __UpperCamelCase = kwargs.pop('text_config_dict' , __A ) __UpperCamelCase = kwargs.pop('vision_config_dict' , __A ) super().__init__(**__A ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __UpperCamelCase = {} # This is the complete result when using `text_config_dict`. __UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __UpperCamelCase = {} # This is the complete result when using `vision_config_dict`. __UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __UpperCamelCase = { str(__A ): value for key, value in _vision_config_dict['id2label'].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __UpperCamelCase = ( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __UpperCamelCase = ( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(__A ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __UpperCamelCase = {} logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' ) if vision_config is None: __UpperCamelCase = {} logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' ) __UpperCamelCase = AltCLIPTextConfig(**__A ) __UpperCamelCase = AltCLIPVisionConfig(**__A ) __UpperCamelCase = projection_dim __UpperCamelCase = logit_scale_init_value __UpperCamelCase = 1.0 @classmethod def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = copy.deepcopy(self.__dict__ ) __UpperCamelCase = self.text_config.to_dict() __UpperCamelCase = self.vision_config.to_dict() __UpperCamelCase = self.__class__.model_type return output
53
0
'''simple docstring''' import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training') # TF training parameters lowerCamelCase__ = False lowerCamelCase__ = False def __lowerCAmelCase (__lowerCAmelCase ): return TrainCommand(__lowercase ) class lowerCAmelCase__ ( __lowerCamelCase ): @staticmethod def lowerCAmelCase__ ( lowerCamelCase__ : ArgumentParser ) ->int: '''simple docstring''' _UpperCAmelCase : int = parser.add_parser("train" , help="CLI tool to train a model on a task." ) train_parser.add_argument( "--train_data" , type=__A , required=__A , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , ) train_parser.add_argument( "--column_label" , type=__A , default=0 , help="Column of the dataset csv file with example labels." ) train_parser.add_argument( "--column_text" , type=__A , default=1 , help="Column of the dataset csv file with example texts." ) train_parser.add_argument( "--column_id" , type=__A , default=2 , help="Column of the dataset csv file with example ids." ) train_parser.add_argument( "--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." ) train_parser.add_argument("--validation_data" , type=__A , default="" , help="path to validation dataset." ) train_parser.add_argument( "--validation_split" , type=__A , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , ) train_parser.add_argument("--output" , type=__A , default="./" , help="path to saved the trained model." ) train_parser.add_argument( "--task" , type=__A , default="text_classification" , help="Task to train the model on." ) train_parser.add_argument( "--model" , type=__A , default="bert-base-uncased" , help="Model\'s name or path to stored model." ) train_parser.add_argument("--train_batch_size" , type=__A , default=32 , help="Batch size for training." ) train_parser.add_argument("--valid_batch_size" , type=__A , default=64 , help="Batch size for validation." ) train_parser.add_argument("--learning_rate" , type=__A , default=3E-5 , help="Learning rate." ) train_parser.add_argument("--adam_epsilon" , type=__A , default=1E-08 , help="Epsilon for Adam optimizer." ) train_parser.set_defaults(func=__A ) def __init__( self : Any , lowerCamelCase__ : Namespace ) ->Optional[Any]: '''simple docstring''' _UpperCAmelCase : List[Any] = logging.get_logger("transformers-cli/training" ) _UpperCAmelCase : int = "tf" if is_tf_available() else "torch" os.makedirs(args.output , exist_ok=__A ) _UpperCAmelCase : str = args.output _UpperCAmelCase : Any = args.column_label _UpperCAmelCase : int = args.column_text _UpperCAmelCase : List[str] = args.column_id self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" ) if args.task == "text_classification": _UpperCAmelCase : Union[str, Any] = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(F"""Loading dataset from {args.train_data}""" ) _UpperCAmelCase : Optional[Any] = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) _UpperCAmelCase : List[str] = None if args.validation_data: self.logger.info(F"""Loading validation dataset from {args.validation_data}""" ) _UpperCAmelCase : Tuple = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) _UpperCAmelCase : Union[str, Any] = args.validation_split _UpperCAmelCase : List[str] = args.train_batch_size _UpperCAmelCase : Optional[int] = args.valid_batch_size _UpperCAmelCase : Optional[Any] = args.learning_rate _UpperCAmelCase : Optional[int] = args.adam_epsilon def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]: '''simple docstring''' if self.framework == "tf": return self.run_tf() return self.run_torch() def lowerCAmelCase__ ( self : List[Any] ) ->Tuple: '''simple docstring''' raise NotImplementedError def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]: '''simple docstring''' self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
234
'''simple docstring''' import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Any ) -> Optional[Any]: """simple docstring""" with open(__lowercase ) as metadata_file: __UpperCamelCase = json.load(__lowercase ) __UpperCamelCase = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['model_config'] ) # Load in the weights from the checkpoint_path __UpperCamelCase = torch.load(__lowercase , map_location='cpu' ) # Load the entity vocab file __UpperCamelCase = load_entity_vocab(__lowercase ) __UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks __UpperCamelCase = AddedToken('<ent>' , lstrip=__lowercase , rstrip=__lowercase ) __UpperCamelCase = AddedToken('<ent2>' , lstrip=__lowercase , rstrip=__lowercase ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(__lowercase ) with open(os.path.join(__lowercase , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(__lowercase , __lowercase ) __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase ) # Initialize the embeddings of the special tokens __UpperCamelCase = state_dict['embeddings.word_embeddings.weight'] __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 ) __UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 ) __UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __UpperCamelCase = F'''encoder.layer.{layer_index}.attention.self.''' __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] __UpperCamelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight'] __UpperCamelCase = entity_emb[entity_vocab['[MASK]']] __UpperCamelCase = LukeModel(config=__lowercase ).eval() __UpperCamelCase , __UpperCamelCase = model.load_state_dict(__lowercase , strict=__lowercase ) if not (len(__lowercase ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'''Missing keys {', '.join(__lowercase )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )): raise ValueError( 'Unexpected keys' F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' ) # Check outputs __UpperCamelCase = LukeTokenizer.from_pretrained(__lowercase , task='entity_classification' ) __UpperCamelCase = ( 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the' ' new world number one avoid a humiliating second- round exit at Wimbledon .' ) __UpperCamelCase = (39, 42) __UpperCamelCase = tokenizer(__lowercase , entity_spans=[span] , add_prefix_space=__lowercase , return_tensors='pt' ) __UpperCamelCase = model(**__lowercase ) # Verify word hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 42, 1024) ) __UpperCamelCase = torch.tensor( [[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] ) else: # base __UpperCamelCase = torch.Size((1, 42, 768) ) __UpperCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": __UpperCamelCase = torch.Size((1, 1, 1024) ) __UpperCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] ) else: # base __UpperCamelCase = torch.Size((1, 1, 768) ) __UpperCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(__lowercase ) ) model.save_pretrained(__lowercase ) def lowercase__ ( __lowercase : Dict ) -> List[str]: """simple docstring""" __UpperCamelCase = {} with open(__lowercase , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(__lowercase ): __UpperCamelCase , __UpperCamelCase = line.rstrip().split('\t' ) __UpperCamelCase = index return entity_vocab if __name__ == "__main__": a__ : Any =argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) a__ : str =parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
53
0
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple: """simple docstring""" return params[F"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :] def UpperCAmelCase ( a_ , a_ , a_ , a_="attention" ) -> Optional[Any]: """simple docstring""" A_ : Optional[Any] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] ) A_ : Any = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) A_ : Optional[Any] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] ) A_ : str = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) A_ : Optional[Any] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] ) A_ : Tuple = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) A_ : Dict = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] ) A_ : List[str] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def UpperCAmelCase ( a_ , a_ , a_ , a_=False ) -> Optional[Any]: """simple docstring""" if split_mlp_wi: A_ : Optional[int] = params[F"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :] A_ : List[Any] = params[F"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :] A_ : Optional[Any] = (wi_a, wi_a) else: A_ : Any = params[F"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :] A_ : str = params[F"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :] return wi, wo def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> str: """simple docstring""" return params[F"{prefix}/{prefix}/{layer_name}/scale"][:, i] def UpperCAmelCase ( a_ , *, a_ , a_ , a_ = False ) -> Union[str, Any]: """simple docstring""" A_ : Dict = traverse_util.flatten_dict(variables["""target"""] ) A_ : int = {"""/""".join(__lowercase ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi A_ : List[str] = """encoder/encoder/mlp/wi_0/kernel""" in old print("""Split MLP:""" , __lowercase ) A_ : int = collections.OrderedDict() # Shared embeddings. A_ : Any = old["""token_embedder/embedding"""] # Encoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). A_ : List[Any] = tax_layer_norm_lookup(__lowercase , __lowercase , """encoder""" , """pre_attention_layer_norm""" ) A_ , A_ , A_ , A_ : List[str] = tax_attention_lookup(__lowercase , __lowercase , """encoder""" , """attention""" ) A_ : str = layer_norm A_ : int = k.T A_ : Dict = o.T A_ : Dict = q.T A_ : List[Any] = v.T # Block i, layer 1 (MLP). A_ : Union[str, Any] = tax_layer_norm_lookup(__lowercase , __lowercase , """encoder""" , """pre_mlp_layer_norm""" ) A_ , A_ : Optional[Any] = tax_mlp_lookup(__lowercase , __lowercase , """encoder""" , __lowercase ) A_ : Dict = layer_norm if split_mlp_wi: A_ : Union[str, Any] = wi[0].T A_ : Optional[int] = wi[1].T else: A_ : int = wi.T A_ : Any = wo.T if scalable_attention: # convert the rel_embedding of each layer A_ : Optional[int] = tax_relpos_bias_lookup( __lowercase , __lowercase , """encoder""" ).T A_ : str = old["""encoder/encoder_norm/scale"""] if not scalable_attention: A_ : List[str] = tax_relpos_bias_lookup( __lowercase , 0 , """encoder""" ).T A_ : Tuple = tax_relpos_bias_lookup( __lowercase , 0 , """decoder""" ).T if not is_encoder_only: # Decoder. for i in range(__lowercase ): # Block i, layer 0 (Self Attention). A_ : Optional[int] = tax_layer_norm_lookup(__lowercase , __lowercase , """decoder""" , """pre_self_attention_layer_norm""" ) A_ , A_ , A_ , A_ : List[str] = tax_attention_lookup(__lowercase , __lowercase , """decoder""" , """self_attention""" ) A_ : Tuple = layer_norm A_ : int = k.T A_ : str = o.T A_ : List[str] = q.T A_ : Optional[int] = v.T # Block i, layer 1 (Cross Attention). A_ : Union[str, Any] = tax_layer_norm_lookup(__lowercase , __lowercase , """decoder""" , """pre_cross_attention_layer_norm""" ) A_ , A_ , A_ , A_ : Dict = tax_attention_lookup(__lowercase , __lowercase , """decoder""" , """encoder_decoder_attention""" ) A_ : List[Any] = layer_norm A_ : Optional[int] = k.T A_ : str = o.T A_ : Optional[int] = q.T A_ : Dict = v.T # Block i, layer 2 (MLP). A_ : Any = tax_layer_norm_lookup(__lowercase , __lowercase , """decoder""" , """pre_mlp_layer_norm""" ) A_ , A_ : int = tax_mlp_lookup(__lowercase , __lowercase , """decoder""" , __lowercase ) A_ : List[Any] = layer_norm if split_mlp_wi: A_ : int = wi[0].T A_ : List[str] = wi[1].T else: A_ : Optional[int] = wi.T A_ : Dict = wo.T if scalable_attention: # convert the rel_embedding of each layer A_ : str = tax_relpos_bias_lookup(__lowercase , __lowercase , """decoder""" ).T A_ : Union[str, Any] = old["""decoder/decoder_norm/scale"""] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: A_ : Tuple = old["""decoder/logits_dense/kernel"""].T return new def UpperCAmelCase ( a_ , a_ ) -> int: """simple docstring""" A_ : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: A_ : Optional[int] = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: A_ : Union[str, Any] = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) A_ : List[str] = state_dict["""shared.weight"""] return state_dict def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]: """simple docstring""" A_ : Any = checkpoints.load_tax_checkpoint(__lowercase ) A_ : Dict = convert_tax_to_pytorch( __lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase , scalable_attention=__lowercase ) A_ : Any = make_state_dict(__lowercase , __lowercase ) model.load_state_dict(__lowercase , strict=__lowercase ) def UpperCAmelCase ( a_ , a_ , a_ , a_ = False , a_ = False , ) -> Optional[int]: """simple docstring""" A_ : Tuple = MTaConfig.from_json_file(__lowercase ) print(F"Building PyTorch model from configuration: {config}" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: A_ : str = UMTaEncoderModel(__lowercase ) else: A_ : List[str] = UMTaForConditionalGeneration(__lowercase ) # Load weights from tf checkpoint load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(__lowercase ) # Verify that we can load the checkpoint. model.from_pretrained(__lowercase ) print("""Done""" ) if __name__ == "__main__": UpperCamelCase__ : List[Any] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) parser.add_argument( '--scalable_attention', action='store_true', help='Whether the model uses scaled attention (umt5 model)', default=False, ) UpperCamelCase__ : List[str] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
344
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class snake_case ( __lowerCamelCase ): """simple docstring""" def _lowerCamelCase ( self : Any ): __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = 8 # DPR tok __UpperCamelCase = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __UpperCamelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok __UpperCamelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) ) __UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __UpperCamelCase = {'unk_token': '<unk>'} __UpperCamelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(__A , exist_ok=__A ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__A ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__A ) ) def _lowerCamelCase ( self : Tuple ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Optional[int] ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def _lowerCamelCase ( self : Union[str, Any] ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def _lowerCamelCase ( self : str ): shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _lowerCamelCase ( self : Any , __A : bool ): __UpperCamelCase = self.get_dummy_dataset() __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , ) if from_disk: __UpperCamelCase = os.path.join(self.tmpdirname , 'dataset' ) __UpperCamelCase = os.path.join(self.tmpdirname , 'index.faiss' ) dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) ) dataset.drop_index('embeddings' ) dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) ) del dataset __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , ) return retriever def _lowerCamelCase ( self : int ): __UpperCamelCase = Dataset.from_dict( { 'id': ['0', '1'], 'text': ['foo', 'bar'], 'title': ['Foo', 'Bar'], 'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT ) __UpperCamelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' ) dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' ) pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) ) __UpperCamelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' ) __UpperCamelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset} pickle.dump(__A , open(__A , 'wb' ) ) __UpperCamelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , ) __UpperCamelCase = RagRetriever( __A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset: __UpperCamelCase = self.get_dummy_dataset() retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Optional[Any] ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] ) self.assertEqual(len(doc_dicts[0]['id'] ) , __A ) self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_legacy_index_retriever() __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__A ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] ) self.assertEqual(len(doc_dicts[0]['text'] ) , __A ) self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _lowerCamelCase ( self : str ): __UpperCamelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__A ) __UpperCamelCase = RagRetriever.from_pretrained(__A ) self.assertIsInstance(__A , __A ) __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever.retrieve(__A , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Optional[Any] ): import torch __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_canonical_hf_index_retriever() __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , __A ) self.assertIsInstance(__A , np.ndarray ) __UpperCamelCase = retriever( __A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( # noqa: F841 out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'], out['doc_ids'], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) self.assertIsInstance(__A , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _lowerCamelCase ( self : Any ): __UpperCamelCase = self.get_dpr_ctx_encoder_tokenizer() __UpperCamelCase = 1 __UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A ) retriever.set_ctx_encoder_tokenizer(__A ) __UpperCamelCase = [[5, 7], [1_0, 1_1]] __UpperCamelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A ) self.assertEqual( len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
53
0